[TG3]: Fix performance regression on 5705.
[safe/jmp/linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.84"
68 #define DRV_MODULE_RELDATE      "October 12, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
205         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
211         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
212         {}
213 };
214
215 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
216
217 static const struct {
218         const char string[ETH_GSTRING_LEN];
219 } ethtool_stats_keys[TG3_NUM_STATS] = {
220         { "rx_octets" },
221         { "rx_fragments" },
222         { "rx_ucast_packets" },
223         { "rx_mcast_packets" },
224         { "rx_bcast_packets" },
225         { "rx_fcs_errors" },
226         { "rx_align_errors" },
227         { "rx_xon_pause_rcvd" },
228         { "rx_xoff_pause_rcvd" },
229         { "rx_mac_ctrl_rcvd" },
230         { "rx_xoff_entered" },
231         { "rx_frame_too_long_errors" },
232         { "rx_jabbers" },
233         { "rx_undersize_packets" },
234         { "rx_in_length_errors" },
235         { "rx_out_length_errors" },
236         { "rx_64_or_less_octet_packets" },
237         { "rx_65_to_127_octet_packets" },
238         { "rx_128_to_255_octet_packets" },
239         { "rx_256_to_511_octet_packets" },
240         { "rx_512_to_1023_octet_packets" },
241         { "rx_1024_to_1522_octet_packets" },
242         { "rx_1523_to_2047_octet_packets" },
243         { "rx_2048_to_4095_octet_packets" },
244         { "rx_4096_to_8191_octet_packets" },
245         { "rx_8192_to_9022_octet_packets" },
246
247         { "tx_octets" },
248         { "tx_collisions" },
249
250         { "tx_xon_sent" },
251         { "tx_xoff_sent" },
252         { "tx_flow_control" },
253         { "tx_mac_errors" },
254         { "tx_single_collisions" },
255         { "tx_mult_collisions" },
256         { "tx_deferred" },
257         { "tx_excessive_collisions" },
258         { "tx_late_collisions" },
259         { "tx_collide_2times" },
260         { "tx_collide_3times" },
261         { "tx_collide_4times" },
262         { "tx_collide_5times" },
263         { "tx_collide_6times" },
264         { "tx_collide_7times" },
265         { "tx_collide_8times" },
266         { "tx_collide_9times" },
267         { "tx_collide_10times" },
268         { "tx_collide_11times" },
269         { "tx_collide_12times" },
270         { "tx_collide_13times" },
271         { "tx_collide_14times" },
272         { "tx_collide_15times" },
273         { "tx_ucast_packets" },
274         { "tx_mcast_packets" },
275         { "tx_bcast_packets" },
276         { "tx_carrier_sense_errors" },
277         { "tx_discards" },
278         { "tx_errors" },
279
280         { "dma_writeq_full" },
281         { "dma_write_prioq_full" },
282         { "rxbds_empty" },
283         { "rx_discards" },
284         { "rx_errors" },
285         { "rx_threshold_hit" },
286
287         { "dma_readq_full" },
288         { "dma_read_prioq_full" },
289         { "tx_comp_queue_full" },
290
291         { "ring_set_send_prod_index" },
292         { "ring_status_update" },
293         { "nic_irqs" },
294         { "nic_avoided_irqs" },
295         { "nic_tx_threshold_hit" }
296 };
297
298 static const struct {
299         const char string[ETH_GSTRING_LEN];
300 } ethtool_test_keys[TG3_NUM_TEST] = {
301         { "nvram test     (online) " },
302         { "link test      (online) " },
303         { "register test  (offline)" },
304         { "memory test    (offline)" },
305         { "loopback test  (offline)" },
306         { "interrupt test (offline)" },
307 };
308
309 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
310 {
311         writel(val, tp->regs + off);
312 }
313
314 static u32 tg3_read32(struct tg3 *tp, u32 off)
315 {
316         return (readl(tp->regs + off));
317 }
318
319 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
320 {
321         writel(val, tp->aperegs + off);
322 }
323
324 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
325 {
326         return (readl(tp->aperegs + off));
327 }
328
329 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
330 {
331         unsigned long flags;
332
333         spin_lock_irqsave(&tp->indirect_lock, flags);
334         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
336         spin_unlock_irqrestore(&tp->indirect_lock, flags);
337 }
338
339 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
340 {
341         writel(val, tp->regs + off);
342         readl(tp->regs + off);
343 }
344
345 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
346 {
347         unsigned long flags;
348         u32 val;
349
350         spin_lock_irqsave(&tp->indirect_lock, flags);
351         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
352         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
353         spin_unlock_irqrestore(&tp->indirect_lock, flags);
354         return val;
355 }
356
357 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         unsigned long flags;
360
361         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
362                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
363                                        TG3_64BIT_REG_LOW, val);
364                 return;
365         }
366         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
367                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
368                                        TG3_64BIT_REG_LOW, val);
369                 return;
370         }
371
372         spin_lock_irqsave(&tp->indirect_lock, flags);
373         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
375         spin_unlock_irqrestore(&tp->indirect_lock, flags);
376
377         /* In indirect mode when disabling interrupts, we also need
378          * to clear the interrupt bit in the GRC local ctrl register.
379          */
380         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
381             (val == 0x1)) {
382                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
383                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
384         }
385 }
386
387 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
388 {
389         unsigned long flags;
390         u32 val;
391
392         spin_lock_irqsave(&tp->indirect_lock, flags);
393         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
394         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395         spin_unlock_irqrestore(&tp->indirect_lock, flags);
396         return val;
397 }
398
399 /* usec_wait specifies the wait time in usec when writing to certain registers
400  * where it is unsafe to read back the register without some delay.
401  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
402  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
403  */
404 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
405 {
406         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
407             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
408                 /* Non-posted methods */
409                 tp->write32(tp, off, val);
410         else {
411                 /* Posted method */
412                 tg3_write32(tp, off, val);
413                 if (usec_wait)
414                         udelay(usec_wait);
415                 tp->read32(tp, off);
416         }
417         /* Wait again after the read for the posted method to guarantee that
418          * the wait time is met.
419          */
420         if (usec_wait)
421                 udelay(usec_wait);
422 }
423
424 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
425 {
426         tp->write32_mbox(tp, off, val);
427         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
428             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
429                 tp->read32_mbox(tp, off);
430 }
431
432 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
433 {
434         void __iomem *mbox = tp->regs + off;
435         writel(val, mbox);
436         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
437                 writel(val, mbox);
438         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
439                 readl(mbox);
440 }
441
442 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
443 {
444         return (readl(tp->regs + off + GRCMBOX_BASE));
445 }
446
447 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
448 {
449         writel(val, tp->regs + off + GRCMBOX_BASE);
450 }
451
452 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
453 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
454 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
455 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
456 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
457
458 #define tw32(reg,val)           tp->write32(tp, reg, val)
459 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
460 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
461 #define tr32(reg)               tp->read32(tp, reg)
462
463 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
464 {
465         unsigned long flags;
466
467         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
468             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
469                 return;
470
471         spin_lock_irqsave(&tp->indirect_lock, flags);
472         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
473                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
475
476                 /* Always leave this as zero. */
477                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
478         } else {
479                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
480                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
481
482                 /* Always leave this as zero. */
483                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
484         }
485         spin_unlock_irqrestore(&tp->indirect_lock, flags);
486 }
487
488 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
489 {
490         unsigned long flags;
491
492         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
493             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
494                 *val = 0;
495                 return;
496         }
497
498         spin_lock_irqsave(&tp->indirect_lock, flags);
499         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
500                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
501                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
502
503                 /* Always leave this as zero. */
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
505         } else {
506                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
507                 *val = tr32(TG3PCI_MEM_WIN_DATA);
508
509                 /* Always leave this as zero. */
510                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
511         }
512         spin_unlock_irqrestore(&tp->indirect_lock, flags);
513 }
514
515 static void tg3_ape_lock_init(struct tg3 *tp)
516 {
517         int i;
518
519         /* Make sure the driver hasn't any stale locks. */
520         for (i = 0; i < 8; i++)
521                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
522                                 APE_LOCK_GRANT_DRIVER);
523 }
524
525 static int tg3_ape_lock(struct tg3 *tp, int locknum)
526 {
527         int i, off;
528         int ret = 0;
529         u32 status;
530
531         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
532                 return 0;
533
534         switch (locknum) {
535                 case TG3_APE_LOCK_MEM:
536                         break;
537                 default:
538                         return -EINVAL;
539         }
540
541         off = 4 * locknum;
542
543         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
544
545         /* Wait for up to 1 millisecond to acquire lock. */
546         for (i = 0; i < 100; i++) {
547                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
548                 if (status == APE_LOCK_GRANT_DRIVER)
549                         break;
550                 udelay(10);
551         }
552
553         if (status != APE_LOCK_GRANT_DRIVER) {
554                 /* Revoke the lock request. */
555                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
556                                 APE_LOCK_GRANT_DRIVER);
557
558                 ret = -EBUSY;
559         }
560
561         return ret;
562 }
563
564 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
565 {
566         int off;
567
568         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
569                 return;
570
571         switch (locknum) {
572                 case TG3_APE_LOCK_MEM:
573                         break;
574                 default:
575                         return;
576         }
577
578         off = 4 * locknum;
579         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
580 }
581
582 static void tg3_disable_ints(struct tg3 *tp)
583 {
584         tw32(TG3PCI_MISC_HOST_CTRL,
585              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
586         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
587 }
588
589 static inline void tg3_cond_int(struct tg3 *tp)
590 {
591         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
592             (tp->hw_status->status & SD_STATUS_UPDATED))
593                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
594         else
595                 tw32(HOSTCC_MODE, tp->coalesce_mode |
596                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
597 }
598
599 static void tg3_enable_ints(struct tg3 *tp)
600 {
601         tp->irq_sync = 0;
602         wmb();
603
604         tw32(TG3PCI_MISC_HOST_CTRL,
605              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
606         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
607                        (tp->last_tag << 24));
608         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
609                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
610                                (tp->last_tag << 24));
611         tg3_cond_int(tp);
612 }
613
614 static inline unsigned int tg3_has_work(struct tg3 *tp)
615 {
616         struct tg3_hw_status *sblk = tp->hw_status;
617         unsigned int work_exists = 0;
618
619         /* check for phy events */
620         if (!(tp->tg3_flags &
621               (TG3_FLAG_USE_LINKCHG_REG |
622                TG3_FLAG_POLL_SERDES))) {
623                 if (sblk->status & SD_STATUS_LINK_CHG)
624                         work_exists = 1;
625         }
626         /* check for RX/TX work to do */
627         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
628             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
629                 work_exists = 1;
630
631         return work_exists;
632 }
633
634 /* tg3_restart_ints
635  *  similar to tg3_enable_ints, but it accurately determines whether there
636  *  is new work pending and can return without flushing the PIO write
637  *  which reenables interrupts
638  */
639 static void tg3_restart_ints(struct tg3 *tp)
640 {
641         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
642                      tp->last_tag << 24);
643         mmiowb();
644
645         /* When doing tagged status, this work check is unnecessary.
646          * The last_tag we write above tells the chip which piece of
647          * work we've completed.
648          */
649         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
650             tg3_has_work(tp))
651                 tw32(HOSTCC_MODE, tp->coalesce_mode |
652                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
653 }
654
655 static inline void tg3_netif_stop(struct tg3 *tp)
656 {
657         tp->dev->trans_start = jiffies; /* prevent tx timeout */
658         napi_disable(&tp->napi);
659         netif_tx_disable(tp->dev);
660 }
661
662 static inline void tg3_netif_start(struct tg3 *tp)
663 {
664         netif_wake_queue(tp->dev);
665         /* NOTE: unconditional netif_wake_queue is only appropriate
666          * so long as all callers are assured to have free tx slots
667          * (such as after tg3_init_hw)
668          */
669         napi_enable(&tp->napi);
670         tp->hw_status->status |= SD_STATUS_UPDATED;
671         tg3_enable_ints(tp);
672 }
673
674 static void tg3_switch_clocks(struct tg3 *tp)
675 {
676         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
677         u32 orig_clock_ctrl;
678
679         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
680             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
681                 return;
682
683         orig_clock_ctrl = clock_ctrl;
684         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
685                        CLOCK_CTRL_CLKRUN_OENABLE |
686                        0x1f);
687         tp->pci_clock_ctrl = clock_ctrl;
688
689         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
690                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
691                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
692                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
693                 }
694         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
695                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
696                             clock_ctrl |
697                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
698                             40);
699                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
700                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
701                             40);
702         }
703         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
704 }
705
706 #define PHY_BUSY_LOOPS  5000
707
708 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
709 {
710         u32 frame_val;
711         unsigned int loops;
712         int ret;
713
714         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
715                 tw32_f(MAC_MI_MODE,
716                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
717                 udelay(80);
718         }
719
720         *val = 0x0;
721
722         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723                       MI_COM_PHY_ADDR_MASK);
724         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725                       MI_COM_REG_ADDR_MASK);
726         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
727
728         tw32_f(MAC_MI_COM, frame_val);
729
730         loops = PHY_BUSY_LOOPS;
731         while (loops != 0) {
732                 udelay(10);
733                 frame_val = tr32(MAC_MI_COM);
734
735                 if ((frame_val & MI_COM_BUSY) == 0) {
736                         udelay(5);
737                         frame_val = tr32(MAC_MI_COM);
738                         break;
739                 }
740                 loops -= 1;
741         }
742
743         ret = -EBUSY;
744         if (loops != 0) {
745                 *val = frame_val & MI_COM_DATA_MASK;
746                 ret = 0;
747         }
748
749         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
750                 tw32_f(MAC_MI_MODE, tp->mi_mode);
751                 udelay(80);
752         }
753
754         return ret;
755 }
756
757 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
758 {
759         u32 frame_val;
760         unsigned int loops;
761         int ret;
762
763         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
764             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
765                 return 0;
766
767         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
768                 tw32_f(MAC_MI_MODE,
769                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
770                 udelay(80);
771         }
772
773         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
774                       MI_COM_PHY_ADDR_MASK);
775         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
776                       MI_COM_REG_ADDR_MASK);
777         frame_val |= (val & MI_COM_DATA_MASK);
778         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
779
780         tw32_f(MAC_MI_COM, frame_val);
781
782         loops = PHY_BUSY_LOOPS;
783         while (loops != 0) {
784                 udelay(10);
785                 frame_val = tr32(MAC_MI_COM);
786                 if ((frame_val & MI_COM_BUSY) == 0) {
787                         udelay(5);
788                         frame_val = tr32(MAC_MI_COM);
789                         break;
790                 }
791                 loops -= 1;
792         }
793
794         ret = -EBUSY;
795         if (loops != 0)
796                 ret = 0;
797
798         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
799                 tw32_f(MAC_MI_MODE, tp->mi_mode);
800                 udelay(80);
801         }
802
803         return ret;
804 }
805
806 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
807 {
808         u32 phy;
809
810         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
811             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
812                 return;
813
814         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
815                 u32 ephy;
816
817                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
818                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
819                                      ephy | MII_TG3_EPHY_SHADOW_EN);
820                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
821                                 if (enable)
822                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
823                                 else
824                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
825                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
826                         }
827                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
828                 }
829         } else {
830                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
831                       MII_TG3_AUXCTL_SHDWSEL_MISC;
832                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
833                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
834                         if (enable)
835                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
836                         else
837                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
838                         phy |= MII_TG3_AUXCTL_MISC_WREN;
839                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
840                 }
841         }
842 }
843
844 static void tg3_phy_set_wirespeed(struct tg3 *tp)
845 {
846         u32 val;
847
848         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
849                 return;
850
851         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
852             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
853                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
854                              (val | (1 << 15) | (1 << 4)));
855 }
856
857 static int tg3_bmcr_reset(struct tg3 *tp)
858 {
859         u32 phy_control;
860         int limit, err;
861
862         /* OK, reset it, and poll the BMCR_RESET bit until it
863          * clears or we time out.
864          */
865         phy_control = BMCR_RESET;
866         err = tg3_writephy(tp, MII_BMCR, phy_control);
867         if (err != 0)
868                 return -EBUSY;
869
870         limit = 5000;
871         while (limit--) {
872                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
873                 if (err != 0)
874                         return -EBUSY;
875
876                 if ((phy_control & BMCR_RESET) == 0) {
877                         udelay(40);
878                         break;
879                 }
880                 udelay(10);
881         }
882         if (limit <= 0)
883                 return -EBUSY;
884
885         return 0;
886 }
887
888 static int tg3_wait_macro_done(struct tg3 *tp)
889 {
890         int limit = 100;
891
892         while (limit--) {
893                 u32 tmp32;
894
895                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
896                         if ((tmp32 & 0x1000) == 0)
897                                 break;
898                 }
899         }
900         if (limit <= 0)
901                 return -EBUSY;
902
903         return 0;
904 }
905
906 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
907 {
908         static const u32 test_pat[4][6] = {
909         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
910         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
911         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
912         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
913         };
914         int chan;
915
916         for (chan = 0; chan < 4; chan++) {
917                 int i;
918
919                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
920                              (chan * 0x2000) | 0x0200);
921                 tg3_writephy(tp, 0x16, 0x0002);
922
923                 for (i = 0; i < 6; i++)
924                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
925                                      test_pat[chan][i]);
926
927                 tg3_writephy(tp, 0x16, 0x0202);
928                 if (tg3_wait_macro_done(tp)) {
929                         *resetp = 1;
930                         return -EBUSY;
931                 }
932
933                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
934                              (chan * 0x2000) | 0x0200);
935                 tg3_writephy(tp, 0x16, 0x0082);
936                 if (tg3_wait_macro_done(tp)) {
937                         *resetp = 1;
938                         return -EBUSY;
939                 }
940
941                 tg3_writephy(tp, 0x16, 0x0802);
942                 if (tg3_wait_macro_done(tp)) {
943                         *resetp = 1;
944                         return -EBUSY;
945                 }
946
947                 for (i = 0; i < 6; i += 2) {
948                         u32 low, high;
949
950                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
951                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
952                             tg3_wait_macro_done(tp)) {
953                                 *resetp = 1;
954                                 return -EBUSY;
955                         }
956                         low &= 0x7fff;
957                         high &= 0x000f;
958                         if (low != test_pat[chan][i] ||
959                             high != test_pat[chan][i+1]) {
960                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
961                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
963
964                                 return -EBUSY;
965                         }
966                 }
967         }
968
969         return 0;
970 }
971
972 static int tg3_phy_reset_chanpat(struct tg3 *tp)
973 {
974         int chan;
975
976         for (chan = 0; chan < 4; chan++) {
977                 int i;
978
979                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
980                              (chan * 0x2000) | 0x0200);
981                 tg3_writephy(tp, 0x16, 0x0002);
982                 for (i = 0; i < 6; i++)
983                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
984                 tg3_writephy(tp, 0x16, 0x0202);
985                 if (tg3_wait_macro_done(tp))
986                         return -EBUSY;
987         }
988
989         return 0;
990 }
991
992 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
993 {
994         u32 reg32, phy9_orig;
995         int retries, do_phy_reset, err;
996
997         retries = 10;
998         do_phy_reset = 1;
999         do {
1000                 if (do_phy_reset) {
1001                         err = tg3_bmcr_reset(tp);
1002                         if (err)
1003                                 return err;
1004                         do_phy_reset = 0;
1005                 }
1006
1007                 /* Disable transmitter and interrupt.  */
1008                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1009                         continue;
1010
1011                 reg32 |= 0x3000;
1012                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1013
1014                 /* Set full-duplex, 1000 mbps.  */
1015                 tg3_writephy(tp, MII_BMCR,
1016                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1017
1018                 /* Set to master mode.  */
1019                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1020                         continue;
1021
1022                 tg3_writephy(tp, MII_TG3_CTRL,
1023                              (MII_TG3_CTRL_AS_MASTER |
1024                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1025
1026                 /* Enable SM_DSP_CLOCK and 6dB.  */
1027                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1028
1029                 /* Block the PHY control access.  */
1030                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1031                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1032
1033                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1034                 if (!err)
1035                         break;
1036         } while (--retries);
1037
1038         err = tg3_phy_reset_chanpat(tp);
1039         if (err)
1040                 return err;
1041
1042         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1043         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1044
1045         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1046         tg3_writephy(tp, 0x16, 0x0000);
1047
1048         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1049             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1050                 /* Set Extended packet length bit for jumbo frames */
1051                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1052         }
1053         else {
1054                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1055         }
1056
1057         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1058
1059         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1060                 reg32 &= ~0x3000;
1061                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1062         } else if (!err)
1063                 err = -EBUSY;
1064
1065         return err;
1066 }
1067
1068 static void tg3_link_report(struct tg3 *);
1069
1070 /* This will reset the tigon3 PHY if there is no valid
1071  * link unless the FORCE argument is non-zero.
1072  */
1073 static int tg3_phy_reset(struct tg3 *tp)
1074 {
1075         u32 phy_status;
1076         int err;
1077
1078         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1079                 u32 val;
1080
1081                 val = tr32(GRC_MISC_CFG);
1082                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1083                 udelay(40);
1084         }
1085         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1086         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1087         if (err != 0)
1088                 return -EBUSY;
1089
1090         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1091                 netif_carrier_off(tp->dev);
1092                 tg3_link_report(tp);
1093         }
1094
1095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1096             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1098                 err = tg3_phy_reset_5703_4_5(tp);
1099                 if (err)
1100                         return err;
1101                 goto out;
1102         }
1103
1104         err = tg3_bmcr_reset(tp);
1105         if (err)
1106                 return err;
1107
1108 out:
1109         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1110                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1111                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1112                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1113                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1114                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1115                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1116         }
1117         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1118                 tg3_writephy(tp, 0x1c, 0x8d68);
1119                 tg3_writephy(tp, 0x1c, 0x8d68);
1120         }
1121         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1122                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1123                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1124                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1125                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1126                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1127                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1128                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1129                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1130         }
1131         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1132                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1133                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1134                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1135                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1136                         tg3_writephy(tp, MII_TG3_TEST1,
1137                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1138                 } else
1139                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1140                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1141         }
1142         /* Set Extended packet length bit (bit 14) on all chips that */
1143         /* support jumbo frames */
1144         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1145                 /* Cannot do read-modify-write on 5401 */
1146                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1147         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1148                 u32 phy_reg;
1149
1150                 /* Set bit 14 with read-modify-write to preserve other bits */
1151                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1152                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1153                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1154         }
1155
1156         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1157          * jumbo frames transmission.
1158          */
1159         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1160                 u32 phy_reg;
1161
1162                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1163                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1164                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1165         }
1166
1167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1168                 /* adjust output voltage */
1169                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1170         }
1171
1172         tg3_phy_toggle_automdix(tp, 1);
1173         tg3_phy_set_wirespeed(tp);
1174         return 0;
1175 }
1176
1177 static void tg3_frob_aux_power(struct tg3 *tp)
1178 {
1179         struct tg3 *tp_peer = tp;
1180
1181         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1182                 return;
1183
1184         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1185             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1186                 struct net_device *dev_peer;
1187
1188                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1189                 /* remove_one() may have been run on the peer. */
1190                 if (!dev_peer)
1191                         tp_peer = tp;
1192                 else
1193                         tp_peer = netdev_priv(dev_peer);
1194         }
1195
1196         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1197             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1198             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1199             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1200                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1201                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1202                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1203                                     (GRC_LCLCTRL_GPIO_OE0 |
1204                                      GRC_LCLCTRL_GPIO_OE1 |
1205                                      GRC_LCLCTRL_GPIO_OE2 |
1206                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1207                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1208                                     100);
1209                 } else {
1210                         u32 no_gpio2;
1211                         u32 grc_local_ctrl = 0;
1212
1213                         if (tp_peer != tp &&
1214                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1215                                 return;
1216
1217                         /* Workaround to prevent overdrawing Amps. */
1218                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1219                             ASIC_REV_5714) {
1220                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1221                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222                                             grc_local_ctrl, 100);
1223                         }
1224
1225                         /* On 5753 and variants, GPIO2 cannot be used. */
1226                         no_gpio2 = tp->nic_sram_data_cfg &
1227                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1228
1229                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1230                                          GRC_LCLCTRL_GPIO_OE1 |
1231                                          GRC_LCLCTRL_GPIO_OE2 |
1232                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1233                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1234                         if (no_gpio2) {
1235                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1236                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1237                         }
1238                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1239                                                     grc_local_ctrl, 100);
1240
1241                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1242
1243                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1244                                                     grc_local_ctrl, 100);
1245
1246                         if (!no_gpio2) {
1247                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1248                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1249                                             grc_local_ctrl, 100);
1250                         }
1251                 }
1252         } else {
1253                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1254                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1255                         if (tp_peer != tp &&
1256                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1257                                 return;
1258
1259                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1260                                     (GRC_LCLCTRL_GPIO_OE1 |
1261                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1262
1263                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1264                                     GRC_LCLCTRL_GPIO_OE1, 100);
1265
1266                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1267                                     (GRC_LCLCTRL_GPIO_OE1 |
1268                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1269                 }
1270         }
1271 }
1272
1273 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1274 {
1275         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1276                 return 1;
1277         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1278                 if (speed != SPEED_10)
1279                         return 1;
1280         } else if (speed == SPEED_10)
1281                 return 1;
1282
1283         return 0;
1284 }
1285
1286 static int tg3_setup_phy(struct tg3 *, int);
1287
1288 #define RESET_KIND_SHUTDOWN     0
1289 #define RESET_KIND_INIT         1
1290 #define RESET_KIND_SUSPEND      2
1291
1292 static void tg3_write_sig_post_reset(struct tg3 *, int);
1293 static int tg3_halt_cpu(struct tg3 *, u32);
1294 static int tg3_nvram_lock(struct tg3 *);
1295 static void tg3_nvram_unlock(struct tg3 *);
1296
1297 static void tg3_power_down_phy(struct tg3 *tp)
1298 {
1299         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1300                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1301                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1302                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1303
1304                         sg_dig_ctrl |=
1305                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1306                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1307                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1308                 }
1309                 return;
1310         }
1311
1312         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1313                 u32 val;
1314
1315                 tg3_bmcr_reset(tp);
1316                 val = tr32(GRC_MISC_CFG);
1317                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1318                 udelay(40);
1319                 return;
1320         } else {
1321                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1322                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1323                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1324         }
1325
1326         /* The PHY should not be powered down on some chips because
1327          * of bugs.
1328          */
1329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1330             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1331             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1332              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1333                 return;
1334         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1335 }
1336
1337 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1338 {
1339         u32 misc_host_ctrl;
1340         u16 power_control, power_caps;
1341         int pm = tp->pm_cap;
1342
1343         /* Make sure register accesses (indirect or otherwise)
1344          * will function correctly.
1345          */
1346         pci_write_config_dword(tp->pdev,
1347                                TG3PCI_MISC_HOST_CTRL,
1348                                tp->misc_host_ctrl);
1349
1350         pci_read_config_word(tp->pdev,
1351                              pm + PCI_PM_CTRL,
1352                              &power_control);
1353         power_control |= PCI_PM_CTRL_PME_STATUS;
1354         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1355         switch (state) {
1356         case PCI_D0:
1357                 power_control |= 0;
1358                 pci_write_config_word(tp->pdev,
1359                                       pm + PCI_PM_CTRL,
1360                                       power_control);
1361                 udelay(100);    /* Delay after power state change */
1362
1363                 /* Switch out of Vaux if it is a NIC */
1364                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1365                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1366
1367                 return 0;
1368
1369         case PCI_D1:
1370                 power_control |= 1;
1371                 break;
1372
1373         case PCI_D2:
1374                 power_control |= 2;
1375                 break;
1376
1377         case PCI_D3hot:
1378                 power_control |= 3;
1379                 break;
1380
1381         default:
1382                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1383                        "requested.\n",
1384                        tp->dev->name, state);
1385                 return -EINVAL;
1386         };
1387
1388         power_control |= PCI_PM_CTRL_PME_ENABLE;
1389
1390         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1391         tw32(TG3PCI_MISC_HOST_CTRL,
1392              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1393
1394         if (tp->link_config.phy_is_low_power == 0) {
1395                 tp->link_config.phy_is_low_power = 1;
1396                 tp->link_config.orig_speed = tp->link_config.speed;
1397                 tp->link_config.orig_duplex = tp->link_config.duplex;
1398                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1399         }
1400
1401         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1402                 tp->link_config.speed = SPEED_10;
1403                 tp->link_config.duplex = DUPLEX_HALF;
1404                 tp->link_config.autoneg = AUTONEG_ENABLE;
1405                 tg3_setup_phy(tp, 0);
1406         }
1407
1408         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1409                 u32 val;
1410
1411                 val = tr32(GRC_VCPU_EXT_CTRL);
1412                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1413         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1414                 int i;
1415                 u32 val;
1416
1417                 for (i = 0; i < 200; i++) {
1418                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1419                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1420                                 break;
1421                         msleep(1);
1422                 }
1423         }
1424         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1425                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1426                                                      WOL_DRV_STATE_SHUTDOWN |
1427                                                      WOL_DRV_WOL |
1428                                                      WOL_SET_MAGIC_PKT);
1429
1430         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1431
1432         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1433                 u32 mac_mode;
1434
1435                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1436                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1437                         udelay(40);
1438
1439                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1440                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1441                         else
1442                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1443
1444                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1445                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1446                             ASIC_REV_5700) {
1447                                 u32 speed = (tp->tg3_flags &
1448                                              TG3_FLAG_WOL_SPEED_100MB) ?
1449                                              SPEED_100 : SPEED_10;
1450                                 if (tg3_5700_link_polarity(tp, speed))
1451                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1452                                 else
1453                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1454                         }
1455                 } else {
1456                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1457                 }
1458
1459                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1460                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1461
1462                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1463                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1464                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1465
1466                 tw32_f(MAC_MODE, mac_mode);
1467                 udelay(100);
1468
1469                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1470                 udelay(10);
1471         }
1472
1473         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1474             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1475              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1476                 u32 base_val;
1477
1478                 base_val = tp->pci_clock_ctrl;
1479                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1480                              CLOCK_CTRL_TXCLK_DISABLE);
1481
1482                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1483                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1484         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1485                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1486                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1487                 /* do nothing */
1488         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1489                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1490                 u32 newbits1, newbits2;
1491
1492                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1493                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1494                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1495                                     CLOCK_CTRL_TXCLK_DISABLE |
1496                                     CLOCK_CTRL_ALTCLK);
1497                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1498                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1499                         newbits1 = CLOCK_CTRL_625_CORE;
1500                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1501                 } else {
1502                         newbits1 = CLOCK_CTRL_ALTCLK;
1503                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1504                 }
1505
1506                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1507                             40);
1508
1509                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1510                             40);
1511
1512                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1513                         u32 newbits3;
1514
1515                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1516                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1517                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1518                                             CLOCK_CTRL_TXCLK_DISABLE |
1519                                             CLOCK_CTRL_44MHZ_CORE);
1520                         } else {
1521                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1522                         }
1523
1524                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1525                                     tp->pci_clock_ctrl | newbits3, 40);
1526                 }
1527         }
1528
1529         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1530             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1531             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1532                 tg3_power_down_phy(tp);
1533
1534         tg3_frob_aux_power(tp);
1535
1536         /* Workaround for unstable PLL clock */
1537         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1538             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1539                 u32 val = tr32(0x7d00);
1540
1541                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1542                 tw32(0x7d00, val);
1543                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1544                         int err;
1545
1546                         err = tg3_nvram_lock(tp);
1547                         tg3_halt_cpu(tp, RX_CPU_BASE);
1548                         if (!err)
1549                                 tg3_nvram_unlock(tp);
1550                 }
1551         }
1552
1553         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1554
1555         /* Finally, set the new power state. */
1556         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1557         udelay(100);    /* Delay after power state change */
1558
1559         return 0;
1560 }
1561
1562 static void tg3_link_report(struct tg3 *tp)
1563 {
1564         if (!netif_carrier_ok(tp->dev)) {
1565                 if (netif_msg_link(tp))
1566                         printk(KERN_INFO PFX "%s: Link is down.\n",
1567                                tp->dev->name);
1568         } else if (netif_msg_link(tp)) {
1569                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1570                        tp->dev->name,
1571                        (tp->link_config.active_speed == SPEED_1000 ?
1572                         1000 :
1573                         (tp->link_config.active_speed == SPEED_100 ?
1574                          100 : 10)),
1575                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1576                         "full" : "half"));
1577
1578                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1579                        "%s for RX.\n",
1580                        tp->dev->name,
1581                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1582                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1583         }
1584 }
1585
1586 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1587 {
1588         u32 new_tg3_flags = 0;
1589         u32 old_rx_mode = tp->rx_mode;
1590         u32 old_tx_mode = tp->tx_mode;
1591
1592         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1593
1594                 /* Convert 1000BaseX flow control bits to 1000BaseT
1595                  * bits before resolving flow control.
1596                  */
1597                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1598                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1599                                        ADVERTISE_PAUSE_ASYM);
1600                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1601
1602                         if (local_adv & ADVERTISE_1000XPAUSE)
1603                                 local_adv |= ADVERTISE_PAUSE_CAP;
1604                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1605                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1606                         if (remote_adv & LPA_1000XPAUSE)
1607                                 remote_adv |= LPA_PAUSE_CAP;
1608                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1609                                 remote_adv |= LPA_PAUSE_ASYM;
1610                 }
1611
1612                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1613                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1614                                 if (remote_adv & LPA_PAUSE_CAP)
1615                                         new_tg3_flags |=
1616                                                 (TG3_FLAG_RX_PAUSE |
1617                                                 TG3_FLAG_TX_PAUSE);
1618                                 else if (remote_adv & LPA_PAUSE_ASYM)
1619                                         new_tg3_flags |=
1620                                                 (TG3_FLAG_RX_PAUSE);
1621                         } else {
1622                                 if (remote_adv & LPA_PAUSE_CAP)
1623                                         new_tg3_flags |=
1624                                                 (TG3_FLAG_RX_PAUSE |
1625                                                 TG3_FLAG_TX_PAUSE);
1626                         }
1627                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1628                         if ((remote_adv & LPA_PAUSE_CAP) &&
1629                         (remote_adv & LPA_PAUSE_ASYM))
1630                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1631                 }
1632
1633                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1634                 tp->tg3_flags |= new_tg3_flags;
1635         } else {
1636                 new_tg3_flags = tp->tg3_flags;
1637         }
1638
1639         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1640                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1641         else
1642                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1643
1644         if (old_rx_mode != tp->rx_mode) {
1645                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1646         }
1647
1648         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1649                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1650         else
1651                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1652
1653         if (old_tx_mode != tp->tx_mode) {
1654                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1655         }
1656 }
1657
1658 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1659 {
1660         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1661         case MII_TG3_AUX_STAT_10HALF:
1662                 *speed = SPEED_10;
1663                 *duplex = DUPLEX_HALF;
1664                 break;
1665
1666         case MII_TG3_AUX_STAT_10FULL:
1667                 *speed = SPEED_10;
1668                 *duplex = DUPLEX_FULL;
1669                 break;
1670
1671         case MII_TG3_AUX_STAT_100HALF:
1672                 *speed = SPEED_100;
1673                 *duplex = DUPLEX_HALF;
1674                 break;
1675
1676         case MII_TG3_AUX_STAT_100FULL:
1677                 *speed = SPEED_100;
1678                 *duplex = DUPLEX_FULL;
1679                 break;
1680
1681         case MII_TG3_AUX_STAT_1000HALF:
1682                 *speed = SPEED_1000;
1683                 *duplex = DUPLEX_HALF;
1684                 break;
1685
1686         case MII_TG3_AUX_STAT_1000FULL:
1687                 *speed = SPEED_1000;
1688                 *duplex = DUPLEX_FULL;
1689                 break;
1690
1691         default:
1692                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1693                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1694                                  SPEED_10;
1695                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1696                                   DUPLEX_HALF;
1697                         break;
1698                 }
1699                 *speed = SPEED_INVALID;
1700                 *duplex = DUPLEX_INVALID;
1701                 break;
1702         };
1703 }
1704
1705 static void tg3_phy_copper_begin(struct tg3 *tp)
1706 {
1707         u32 new_adv;
1708         int i;
1709
1710         if (tp->link_config.phy_is_low_power) {
1711                 /* Entering low power mode.  Disable gigabit and
1712                  * 100baseT advertisements.
1713                  */
1714                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1715
1716                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1717                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1718                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1719                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1720
1721                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1722         } else if (tp->link_config.speed == SPEED_INVALID) {
1723                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1724                         tp->link_config.advertising &=
1725                                 ~(ADVERTISED_1000baseT_Half |
1726                                   ADVERTISED_1000baseT_Full);
1727
1728                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1729                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1730                         new_adv |= ADVERTISE_10HALF;
1731                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1732                         new_adv |= ADVERTISE_10FULL;
1733                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1734                         new_adv |= ADVERTISE_100HALF;
1735                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1736                         new_adv |= ADVERTISE_100FULL;
1737                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1738
1739                 if (tp->link_config.advertising &
1740                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1741                         new_adv = 0;
1742                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1743                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1744                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1745                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1746                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1747                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1748                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1749                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1750                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1751                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1752                 } else {
1753                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1754                 }
1755         } else {
1756                 /* Asking for a specific link mode. */
1757                 if (tp->link_config.speed == SPEED_1000) {
1758                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1759                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1760
1761                         if (tp->link_config.duplex == DUPLEX_FULL)
1762                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1763                         else
1764                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1765                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1766                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1767                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1768                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1769                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1770                 } else {
1771                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1772
1773                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1774                         if (tp->link_config.speed == SPEED_100) {
1775                                 if (tp->link_config.duplex == DUPLEX_FULL)
1776                                         new_adv |= ADVERTISE_100FULL;
1777                                 else
1778                                         new_adv |= ADVERTISE_100HALF;
1779                         } else {
1780                                 if (tp->link_config.duplex == DUPLEX_FULL)
1781                                         new_adv |= ADVERTISE_10FULL;
1782                                 else
1783                                         new_adv |= ADVERTISE_10HALF;
1784                         }
1785                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1786                 }
1787         }
1788
1789         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1790             tp->link_config.speed != SPEED_INVALID) {
1791                 u32 bmcr, orig_bmcr;
1792
1793                 tp->link_config.active_speed = tp->link_config.speed;
1794                 tp->link_config.active_duplex = tp->link_config.duplex;
1795
1796                 bmcr = 0;
1797                 switch (tp->link_config.speed) {
1798                 default:
1799                 case SPEED_10:
1800                         break;
1801
1802                 case SPEED_100:
1803                         bmcr |= BMCR_SPEED100;
1804                         break;
1805
1806                 case SPEED_1000:
1807                         bmcr |= TG3_BMCR_SPEED1000;
1808                         break;
1809                 };
1810
1811                 if (tp->link_config.duplex == DUPLEX_FULL)
1812                         bmcr |= BMCR_FULLDPLX;
1813
1814                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1815                     (bmcr != orig_bmcr)) {
1816                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1817                         for (i = 0; i < 1500; i++) {
1818                                 u32 tmp;
1819
1820                                 udelay(10);
1821                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1822                                     tg3_readphy(tp, MII_BMSR, &tmp))
1823                                         continue;
1824                                 if (!(tmp & BMSR_LSTATUS)) {
1825                                         udelay(40);
1826                                         break;
1827                                 }
1828                         }
1829                         tg3_writephy(tp, MII_BMCR, bmcr);
1830                         udelay(40);
1831                 }
1832         } else {
1833                 tg3_writephy(tp, MII_BMCR,
1834                              BMCR_ANENABLE | BMCR_ANRESTART);
1835         }
1836 }
1837
1838 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1839 {
1840         int err;
1841
1842         /* Turn off tap power management. */
1843         /* Set Extended packet length bit */
1844         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1845
1846         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1847         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1848
1849         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1850         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1851
1852         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1853         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1854
1855         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1856         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1857
1858         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1859         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1860
1861         udelay(40);
1862
1863         return err;
1864 }
1865
1866 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1867 {
1868         u32 adv_reg, all_mask = 0;
1869
1870         if (mask & ADVERTISED_10baseT_Half)
1871                 all_mask |= ADVERTISE_10HALF;
1872         if (mask & ADVERTISED_10baseT_Full)
1873                 all_mask |= ADVERTISE_10FULL;
1874         if (mask & ADVERTISED_100baseT_Half)
1875                 all_mask |= ADVERTISE_100HALF;
1876         if (mask & ADVERTISED_100baseT_Full)
1877                 all_mask |= ADVERTISE_100FULL;
1878
1879         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1880                 return 0;
1881
1882         if ((adv_reg & all_mask) != all_mask)
1883                 return 0;
1884         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1885                 u32 tg3_ctrl;
1886
1887                 all_mask = 0;
1888                 if (mask & ADVERTISED_1000baseT_Half)
1889                         all_mask |= ADVERTISE_1000HALF;
1890                 if (mask & ADVERTISED_1000baseT_Full)
1891                         all_mask |= ADVERTISE_1000FULL;
1892
1893                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1894                         return 0;
1895
1896                 if ((tg3_ctrl & all_mask) != all_mask)
1897                         return 0;
1898         }
1899         return 1;
1900 }
1901
1902 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1903 {
1904         int current_link_up;
1905         u32 bmsr, dummy;
1906         u16 current_speed;
1907         u8 current_duplex;
1908         int i, err;
1909
1910         tw32(MAC_EVENT, 0);
1911
1912         tw32_f(MAC_STATUS,
1913              (MAC_STATUS_SYNC_CHANGED |
1914               MAC_STATUS_CFG_CHANGED |
1915               MAC_STATUS_MI_COMPLETION |
1916               MAC_STATUS_LNKSTATE_CHANGED));
1917         udelay(40);
1918
1919         tp->mi_mode = MAC_MI_MODE_BASE;
1920         tw32_f(MAC_MI_MODE, tp->mi_mode);
1921         udelay(80);
1922
1923         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1924
1925         /* Some third-party PHYs need to be reset on link going
1926          * down.
1927          */
1928         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1929              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1930              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1931             netif_carrier_ok(tp->dev)) {
1932                 tg3_readphy(tp, MII_BMSR, &bmsr);
1933                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1934                     !(bmsr & BMSR_LSTATUS))
1935                         force_reset = 1;
1936         }
1937         if (force_reset)
1938                 tg3_phy_reset(tp);
1939
1940         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1941                 tg3_readphy(tp, MII_BMSR, &bmsr);
1942                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1943                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1944                         bmsr = 0;
1945
1946                 if (!(bmsr & BMSR_LSTATUS)) {
1947                         err = tg3_init_5401phy_dsp(tp);
1948                         if (err)
1949                                 return err;
1950
1951                         tg3_readphy(tp, MII_BMSR, &bmsr);
1952                         for (i = 0; i < 1000; i++) {
1953                                 udelay(10);
1954                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1955                                     (bmsr & BMSR_LSTATUS)) {
1956                                         udelay(40);
1957                                         break;
1958                                 }
1959                         }
1960
1961                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1962                             !(bmsr & BMSR_LSTATUS) &&
1963                             tp->link_config.active_speed == SPEED_1000) {
1964                                 err = tg3_phy_reset(tp);
1965                                 if (!err)
1966                                         err = tg3_init_5401phy_dsp(tp);
1967                                 if (err)
1968                                         return err;
1969                         }
1970                 }
1971         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1972                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1973                 /* 5701 {A0,B0} CRC bug workaround */
1974                 tg3_writephy(tp, 0x15, 0x0a75);
1975                 tg3_writephy(tp, 0x1c, 0x8c68);
1976                 tg3_writephy(tp, 0x1c, 0x8d68);
1977                 tg3_writephy(tp, 0x1c, 0x8c68);
1978         }
1979
1980         /* Clear pending interrupts... */
1981         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1982         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1983
1984         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1985                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1986         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1987                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1988
1989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1990             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1991                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1992                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1993                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1994                 else
1995                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1996         }
1997
1998         current_link_up = 0;
1999         current_speed = SPEED_INVALID;
2000         current_duplex = DUPLEX_INVALID;
2001
2002         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2003                 u32 val;
2004
2005                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2006                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2007                 if (!(val & (1 << 10))) {
2008                         val |= (1 << 10);
2009                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2010                         goto relink;
2011                 }
2012         }
2013
2014         bmsr = 0;
2015         for (i = 0; i < 100; i++) {
2016                 tg3_readphy(tp, MII_BMSR, &bmsr);
2017                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2018                     (bmsr & BMSR_LSTATUS))
2019                         break;
2020                 udelay(40);
2021         }
2022
2023         if (bmsr & BMSR_LSTATUS) {
2024                 u32 aux_stat, bmcr;
2025
2026                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2027                 for (i = 0; i < 2000; i++) {
2028                         udelay(10);
2029                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2030                             aux_stat)
2031                                 break;
2032                 }
2033
2034                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2035                                              &current_speed,
2036                                              &current_duplex);
2037
2038                 bmcr = 0;
2039                 for (i = 0; i < 200; i++) {
2040                         tg3_readphy(tp, MII_BMCR, &bmcr);
2041                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2042                                 continue;
2043                         if (bmcr && bmcr != 0x7fff)
2044                                 break;
2045                         udelay(10);
2046                 }
2047
2048                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2049                         if (bmcr & BMCR_ANENABLE) {
2050                                 current_link_up = 1;
2051
2052                                 /* Force autoneg restart if we are exiting
2053                                  * low power mode.
2054                                  */
2055                                 if (!tg3_copper_is_advertising_all(tp,
2056                                                 tp->link_config.advertising))
2057                                         current_link_up = 0;
2058                         } else {
2059                                 current_link_up = 0;
2060                         }
2061                 } else {
2062                         if (!(bmcr & BMCR_ANENABLE) &&
2063                             tp->link_config.speed == current_speed &&
2064                             tp->link_config.duplex == current_duplex) {
2065                                 current_link_up = 1;
2066                         } else {
2067                                 current_link_up = 0;
2068                         }
2069                 }
2070
2071                 tp->link_config.active_speed = current_speed;
2072                 tp->link_config.active_duplex = current_duplex;
2073         }
2074
2075         if (current_link_up == 1 &&
2076             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2077             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2078                 u32 local_adv, remote_adv;
2079
2080                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2081                         local_adv = 0;
2082                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2083
2084                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2085                         remote_adv = 0;
2086
2087                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2088
2089                 /* If we are not advertising full pause capability,
2090                  * something is wrong.  Bring the link down and reconfigure.
2091                  */
2092                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2093                         current_link_up = 0;
2094                 } else {
2095                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2096                 }
2097         }
2098 relink:
2099         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2100                 u32 tmp;
2101
2102                 tg3_phy_copper_begin(tp);
2103
2104                 tg3_readphy(tp, MII_BMSR, &tmp);
2105                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2106                     (tmp & BMSR_LSTATUS))
2107                         current_link_up = 1;
2108         }
2109
2110         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2111         if (current_link_up == 1) {
2112                 if (tp->link_config.active_speed == SPEED_100 ||
2113                     tp->link_config.active_speed == SPEED_10)
2114                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2115                 else
2116                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2117         } else
2118                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2119
2120         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2121         if (tp->link_config.active_duplex == DUPLEX_HALF)
2122                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2123
2124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2125                 if (current_link_up == 1 &&
2126                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2127                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2128                 else
2129                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2130         }
2131
2132         /* ??? Without this setting Netgear GA302T PHY does not
2133          * ??? send/receive packets...
2134          */
2135         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2136             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2137                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2138                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2139                 udelay(80);
2140         }
2141
2142         tw32_f(MAC_MODE, tp->mac_mode);
2143         udelay(40);
2144
2145         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2146                 /* Polled via timer. */
2147                 tw32_f(MAC_EVENT, 0);
2148         } else {
2149                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2150         }
2151         udelay(40);
2152
2153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2154             current_link_up == 1 &&
2155             tp->link_config.active_speed == SPEED_1000 &&
2156             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2157              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2158                 udelay(120);
2159                 tw32_f(MAC_STATUS,
2160                      (MAC_STATUS_SYNC_CHANGED |
2161                       MAC_STATUS_CFG_CHANGED));
2162                 udelay(40);
2163                 tg3_write_mem(tp,
2164                               NIC_SRAM_FIRMWARE_MBOX,
2165                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2166         }
2167
2168         if (current_link_up != netif_carrier_ok(tp->dev)) {
2169                 if (current_link_up)
2170                         netif_carrier_on(tp->dev);
2171                 else
2172                         netif_carrier_off(tp->dev);
2173                 tg3_link_report(tp);
2174         }
2175
2176         return 0;
2177 }
2178
2179 struct tg3_fiber_aneginfo {
2180         int state;
2181 #define ANEG_STATE_UNKNOWN              0
2182 #define ANEG_STATE_AN_ENABLE            1
2183 #define ANEG_STATE_RESTART_INIT         2
2184 #define ANEG_STATE_RESTART              3
2185 #define ANEG_STATE_DISABLE_LINK_OK      4
2186 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2187 #define ANEG_STATE_ABILITY_DETECT       6
2188 #define ANEG_STATE_ACK_DETECT_INIT      7
2189 #define ANEG_STATE_ACK_DETECT           8
2190 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2191 #define ANEG_STATE_COMPLETE_ACK         10
2192 #define ANEG_STATE_IDLE_DETECT_INIT     11
2193 #define ANEG_STATE_IDLE_DETECT          12
2194 #define ANEG_STATE_LINK_OK              13
2195 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2196 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2197
2198         u32 flags;
2199 #define MR_AN_ENABLE            0x00000001
2200 #define MR_RESTART_AN           0x00000002
2201 #define MR_AN_COMPLETE          0x00000004
2202 #define MR_PAGE_RX              0x00000008
2203 #define MR_NP_LOADED            0x00000010
2204 #define MR_TOGGLE_TX            0x00000020
2205 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2206 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2207 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2208 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2209 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2210 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2211 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2212 #define MR_TOGGLE_RX            0x00002000
2213 #define MR_NP_RX                0x00004000
2214
2215 #define MR_LINK_OK              0x80000000
2216
2217         unsigned long link_time, cur_time;
2218
2219         u32 ability_match_cfg;
2220         int ability_match_count;
2221
2222         char ability_match, idle_match, ack_match;
2223
2224         u32 txconfig, rxconfig;
2225 #define ANEG_CFG_NP             0x00000080
2226 #define ANEG_CFG_ACK            0x00000040
2227 #define ANEG_CFG_RF2            0x00000020
2228 #define ANEG_CFG_RF1            0x00000010
2229 #define ANEG_CFG_PS2            0x00000001
2230 #define ANEG_CFG_PS1            0x00008000
2231 #define ANEG_CFG_HD             0x00004000
2232 #define ANEG_CFG_FD             0x00002000
2233 #define ANEG_CFG_INVAL          0x00001f06
2234
2235 };
2236 #define ANEG_OK         0
2237 #define ANEG_DONE       1
2238 #define ANEG_TIMER_ENAB 2
2239 #define ANEG_FAILED     -1
2240
2241 #define ANEG_STATE_SETTLE_TIME  10000
2242
2243 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2244                                    struct tg3_fiber_aneginfo *ap)
2245 {
2246         unsigned long delta;
2247         u32 rx_cfg_reg;
2248         int ret;
2249
2250         if (ap->state == ANEG_STATE_UNKNOWN) {
2251                 ap->rxconfig = 0;
2252                 ap->link_time = 0;
2253                 ap->cur_time = 0;
2254                 ap->ability_match_cfg = 0;
2255                 ap->ability_match_count = 0;
2256                 ap->ability_match = 0;
2257                 ap->idle_match = 0;
2258                 ap->ack_match = 0;
2259         }
2260         ap->cur_time++;
2261
2262         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2263                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2264
2265                 if (rx_cfg_reg != ap->ability_match_cfg) {
2266                         ap->ability_match_cfg = rx_cfg_reg;
2267                         ap->ability_match = 0;
2268                         ap->ability_match_count = 0;
2269                 } else {
2270                         if (++ap->ability_match_count > 1) {
2271                                 ap->ability_match = 1;
2272                                 ap->ability_match_cfg = rx_cfg_reg;
2273                         }
2274                 }
2275                 if (rx_cfg_reg & ANEG_CFG_ACK)
2276                         ap->ack_match = 1;
2277                 else
2278                         ap->ack_match = 0;
2279
2280                 ap->idle_match = 0;
2281         } else {
2282                 ap->idle_match = 1;
2283                 ap->ability_match_cfg = 0;
2284                 ap->ability_match_count = 0;
2285                 ap->ability_match = 0;
2286                 ap->ack_match = 0;
2287
2288                 rx_cfg_reg = 0;
2289         }
2290
2291         ap->rxconfig = rx_cfg_reg;
2292         ret = ANEG_OK;
2293
2294         switch(ap->state) {
2295         case ANEG_STATE_UNKNOWN:
2296                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2297                         ap->state = ANEG_STATE_AN_ENABLE;
2298
2299                 /* fallthru */
2300         case ANEG_STATE_AN_ENABLE:
2301                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2302                 if (ap->flags & MR_AN_ENABLE) {
2303                         ap->link_time = 0;
2304                         ap->cur_time = 0;
2305                         ap->ability_match_cfg = 0;
2306                         ap->ability_match_count = 0;
2307                         ap->ability_match = 0;
2308                         ap->idle_match = 0;
2309                         ap->ack_match = 0;
2310
2311                         ap->state = ANEG_STATE_RESTART_INIT;
2312                 } else {
2313                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2314                 }
2315                 break;
2316
2317         case ANEG_STATE_RESTART_INIT:
2318                 ap->link_time = ap->cur_time;
2319                 ap->flags &= ~(MR_NP_LOADED);
2320                 ap->txconfig = 0;
2321                 tw32(MAC_TX_AUTO_NEG, 0);
2322                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2323                 tw32_f(MAC_MODE, tp->mac_mode);
2324                 udelay(40);
2325
2326                 ret = ANEG_TIMER_ENAB;
2327                 ap->state = ANEG_STATE_RESTART;
2328
2329                 /* fallthru */
2330         case ANEG_STATE_RESTART:
2331                 delta = ap->cur_time - ap->link_time;
2332                 if (delta > ANEG_STATE_SETTLE_TIME) {
2333                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2334                 } else {
2335                         ret = ANEG_TIMER_ENAB;
2336                 }
2337                 break;
2338
2339         case ANEG_STATE_DISABLE_LINK_OK:
2340                 ret = ANEG_DONE;
2341                 break;
2342
2343         case ANEG_STATE_ABILITY_DETECT_INIT:
2344                 ap->flags &= ~(MR_TOGGLE_TX);
2345                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2346                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2347                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2348                 tw32_f(MAC_MODE, tp->mac_mode);
2349                 udelay(40);
2350
2351                 ap->state = ANEG_STATE_ABILITY_DETECT;
2352                 break;
2353
2354         case ANEG_STATE_ABILITY_DETECT:
2355                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2356                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2357                 }
2358                 break;
2359
2360         case ANEG_STATE_ACK_DETECT_INIT:
2361                 ap->txconfig |= ANEG_CFG_ACK;
2362                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2363                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2364                 tw32_f(MAC_MODE, tp->mac_mode);
2365                 udelay(40);
2366
2367                 ap->state = ANEG_STATE_ACK_DETECT;
2368
2369                 /* fallthru */
2370         case ANEG_STATE_ACK_DETECT:
2371                 if (ap->ack_match != 0) {
2372                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2373                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2374                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2375                         } else {
2376                                 ap->state = ANEG_STATE_AN_ENABLE;
2377                         }
2378                 } else if (ap->ability_match != 0 &&
2379                            ap->rxconfig == 0) {
2380                         ap->state = ANEG_STATE_AN_ENABLE;
2381                 }
2382                 break;
2383
2384         case ANEG_STATE_COMPLETE_ACK_INIT:
2385                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2386                         ret = ANEG_FAILED;
2387                         break;
2388                 }
2389                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2390                                MR_LP_ADV_HALF_DUPLEX |
2391                                MR_LP_ADV_SYM_PAUSE |
2392                                MR_LP_ADV_ASYM_PAUSE |
2393                                MR_LP_ADV_REMOTE_FAULT1 |
2394                                MR_LP_ADV_REMOTE_FAULT2 |
2395                                MR_LP_ADV_NEXT_PAGE |
2396                                MR_TOGGLE_RX |
2397                                MR_NP_RX);
2398                 if (ap->rxconfig & ANEG_CFG_FD)
2399                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2400                 if (ap->rxconfig & ANEG_CFG_HD)
2401                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2402                 if (ap->rxconfig & ANEG_CFG_PS1)
2403                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2404                 if (ap->rxconfig & ANEG_CFG_PS2)
2405                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2406                 if (ap->rxconfig & ANEG_CFG_RF1)
2407                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2408                 if (ap->rxconfig & ANEG_CFG_RF2)
2409                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2410                 if (ap->rxconfig & ANEG_CFG_NP)
2411                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2412
2413                 ap->link_time = ap->cur_time;
2414
2415                 ap->flags ^= (MR_TOGGLE_TX);
2416                 if (ap->rxconfig & 0x0008)
2417                         ap->flags |= MR_TOGGLE_RX;
2418                 if (ap->rxconfig & ANEG_CFG_NP)
2419                         ap->flags |= MR_NP_RX;
2420                 ap->flags |= MR_PAGE_RX;
2421
2422                 ap->state = ANEG_STATE_COMPLETE_ACK;
2423                 ret = ANEG_TIMER_ENAB;
2424                 break;
2425
2426         case ANEG_STATE_COMPLETE_ACK:
2427                 if (ap->ability_match != 0 &&
2428                     ap->rxconfig == 0) {
2429                         ap->state = ANEG_STATE_AN_ENABLE;
2430                         break;
2431                 }
2432                 delta = ap->cur_time - ap->link_time;
2433                 if (delta > ANEG_STATE_SETTLE_TIME) {
2434                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2435                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2436                         } else {
2437                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2438                                     !(ap->flags & MR_NP_RX)) {
2439                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2440                                 } else {
2441                                         ret = ANEG_FAILED;
2442                                 }
2443                         }
2444                 }
2445                 break;
2446
2447         case ANEG_STATE_IDLE_DETECT_INIT:
2448                 ap->link_time = ap->cur_time;
2449                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2450                 tw32_f(MAC_MODE, tp->mac_mode);
2451                 udelay(40);
2452
2453                 ap->state = ANEG_STATE_IDLE_DETECT;
2454                 ret = ANEG_TIMER_ENAB;
2455                 break;
2456
2457         case ANEG_STATE_IDLE_DETECT:
2458                 if (ap->ability_match != 0 &&
2459                     ap->rxconfig == 0) {
2460                         ap->state = ANEG_STATE_AN_ENABLE;
2461                         break;
2462                 }
2463                 delta = ap->cur_time - ap->link_time;
2464                 if (delta > ANEG_STATE_SETTLE_TIME) {
2465                         /* XXX another gem from the Broadcom driver :( */
2466                         ap->state = ANEG_STATE_LINK_OK;
2467                 }
2468                 break;
2469
2470         case ANEG_STATE_LINK_OK:
2471                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2472                 ret = ANEG_DONE;
2473                 break;
2474
2475         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2476                 /* ??? unimplemented */
2477                 break;
2478
2479         case ANEG_STATE_NEXT_PAGE_WAIT:
2480                 /* ??? unimplemented */
2481                 break;
2482
2483         default:
2484                 ret = ANEG_FAILED;
2485                 break;
2486         };
2487
2488         return ret;
2489 }
2490
2491 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2492 {
2493         int res = 0;
2494         struct tg3_fiber_aneginfo aninfo;
2495         int status = ANEG_FAILED;
2496         unsigned int tick;
2497         u32 tmp;
2498
2499         tw32_f(MAC_TX_AUTO_NEG, 0);
2500
2501         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2502         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2503         udelay(40);
2504
2505         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2506         udelay(40);
2507
2508         memset(&aninfo, 0, sizeof(aninfo));
2509         aninfo.flags |= MR_AN_ENABLE;
2510         aninfo.state = ANEG_STATE_UNKNOWN;
2511         aninfo.cur_time = 0;
2512         tick = 0;
2513         while (++tick < 195000) {
2514                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2515                 if (status == ANEG_DONE || status == ANEG_FAILED)
2516                         break;
2517
2518                 udelay(1);
2519         }
2520
2521         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2522         tw32_f(MAC_MODE, tp->mac_mode);
2523         udelay(40);
2524
2525         *flags = aninfo.flags;
2526
2527         if (status == ANEG_DONE &&
2528             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2529                              MR_LP_ADV_FULL_DUPLEX)))
2530                 res = 1;
2531
2532         return res;
2533 }
2534
2535 static void tg3_init_bcm8002(struct tg3 *tp)
2536 {
2537         u32 mac_status = tr32(MAC_STATUS);
2538         int i;
2539
2540         /* Reset when initting first time or we have a link. */
2541         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2542             !(mac_status & MAC_STATUS_PCS_SYNCED))
2543                 return;
2544
2545         /* Set PLL lock range. */
2546         tg3_writephy(tp, 0x16, 0x8007);
2547
2548         /* SW reset */
2549         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2550
2551         /* Wait for reset to complete. */
2552         /* XXX schedule_timeout() ... */
2553         for (i = 0; i < 500; i++)
2554                 udelay(10);
2555
2556         /* Config mode; select PMA/Ch 1 regs. */
2557         tg3_writephy(tp, 0x10, 0x8411);
2558
2559         /* Enable auto-lock and comdet, select txclk for tx. */
2560         tg3_writephy(tp, 0x11, 0x0a10);
2561
2562         tg3_writephy(tp, 0x18, 0x00a0);
2563         tg3_writephy(tp, 0x16, 0x41ff);
2564
2565         /* Assert and deassert POR. */
2566         tg3_writephy(tp, 0x13, 0x0400);
2567         udelay(40);
2568         tg3_writephy(tp, 0x13, 0x0000);
2569
2570         tg3_writephy(tp, 0x11, 0x0a50);
2571         udelay(40);
2572         tg3_writephy(tp, 0x11, 0x0a10);
2573
2574         /* Wait for signal to stabilize */
2575         /* XXX schedule_timeout() ... */
2576         for (i = 0; i < 15000; i++)
2577                 udelay(10);
2578
2579         /* Deselect the channel register so we can read the PHYID
2580          * later.
2581          */
2582         tg3_writephy(tp, 0x10, 0x8011);
2583 }
2584
2585 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2586 {
2587         u32 sg_dig_ctrl, sg_dig_status;
2588         u32 serdes_cfg, expected_sg_dig_ctrl;
2589         int workaround, port_a;
2590         int current_link_up;
2591
2592         serdes_cfg = 0;
2593         expected_sg_dig_ctrl = 0;
2594         workaround = 0;
2595         port_a = 1;
2596         current_link_up = 0;
2597
2598         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2599             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2600                 workaround = 1;
2601                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2602                         port_a = 0;
2603
2604                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2605                 /* preserve bits 20-23 for voltage regulator */
2606                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2607         }
2608
2609         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2610
2611         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2612                 if (sg_dig_ctrl & (1 << 31)) {
2613                         if (workaround) {
2614                                 u32 val = serdes_cfg;
2615
2616                                 if (port_a)
2617                                         val |= 0xc010000;
2618                                 else
2619                                         val |= 0x4010000;
2620                                 tw32_f(MAC_SERDES_CFG, val);
2621                         }
2622                         tw32_f(SG_DIG_CTRL, 0x01388400);
2623                 }
2624                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2625                         tg3_setup_flow_control(tp, 0, 0);
2626                         current_link_up = 1;
2627                 }
2628                 goto out;
2629         }
2630
2631         /* Want auto-negotiation.  */
2632         expected_sg_dig_ctrl = 0x81388400;
2633
2634         /* Pause capability */
2635         expected_sg_dig_ctrl |= (1 << 11);
2636
2637         /* Asymettric pause */
2638         expected_sg_dig_ctrl |= (1 << 12);
2639
2640         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2641                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2642                     tp->serdes_counter &&
2643                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2644                                     MAC_STATUS_RCVD_CFG)) ==
2645                      MAC_STATUS_PCS_SYNCED)) {
2646                         tp->serdes_counter--;
2647                         current_link_up = 1;
2648                         goto out;
2649                 }
2650 restart_autoneg:
2651                 if (workaround)
2652                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2653                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2654                 udelay(5);
2655                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2656
2657                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2658                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2659         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2660                                  MAC_STATUS_SIGNAL_DET)) {
2661                 sg_dig_status = tr32(SG_DIG_STATUS);
2662                 mac_status = tr32(MAC_STATUS);
2663
2664                 if ((sg_dig_status & (1 << 1)) &&
2665                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2666                         u32 local_adv, remote_adv;
2667
2668                         local_adv = ADVERTISE_PAUSE_CAP;
2669                         remote_adv = 0;
2670                         if (sg_dig_status & (1 << 19))
2671                                 remote_adv |= LPA_PAUSE_CAP;
2672                         if (sg_dig_status & (1 << 20))
2673                                 remote_adv |= LPA_PAUSE_ASYM;
2674
2675                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2676                         current_link_up = 1;
2677                         tp->serdes_counter = 0;
2678                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2679                 } else if (!(sg_dig_status & (1 << 1))) {
2680                         if (tp->serdes_counter)
2681                                 tp->serdes_counter--;
2682                         else {
2683                                 if (workaround) {
2684                                         u32 val = serdes_cfg;
2685
2686                                         if (port_a)
2687                                                 val |= 0xc010000;
2688                                         else
2689                                                 val |= 0x4010000;
2690
2691                                         tw32_f(MAC_SERDES_CFG, val);
2692                                 }
2693
2694                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2695                                 udelay(40);
2696
2697                                 /* Link parallel detection - link is up */
2698                                 /* only if we have PCS_SYNC and not */
2699                                 /* receiving config code words */
2700                                 mac_status = tr32(MAC_STATUS);
2701                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2702                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2703                                         tg3_setup_flow_control(tp, 0, 0);
2704                                         current_link_up = 1;
2705                                         tp->tg3_flags2 |=
2706                                                 TG3_FLG2_PARALLEL_DETECT;
2707                                         tp->serdes_counter =
2708                                                 SERDES_PARALLEL_DET_TIMEOUT;
2709                                 } else
2710                                         goto restart_autoneg;
2711                         }
2712                 }
2713         } else {
2714                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2715                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2716         }
2717
2718 out:
2719         return current_link_up;
2720 }
2721
2722 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2723 {
2724         int current_link_up = 0;
2725
2726         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2727                 goto out;
2728
2729         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2730                 u32 flags;
2731                 int i;
2732
2733                 if (fiber_autoneg(tp, &flags)) {
2734                         u32 local_adv, remote_adv;
2735
2736                         local_adv = ADVERTISE_PAUSE_CAP;
2737                         remote_adv = 0;
2738                         if (flags & MR_LP_ADV_SYM_PAUSE)
2739                                 remote_adv |= LPA_PAUSE_CAP;
2740                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2741                                 remote_adv |= LPA_PAUSE_ASYM;
2742
2743                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2744
2745                         current_link_up = 1;
2746                 }
2747                 for (i = 0; i < 30; i++) {
2748                         udelay(20);
2749                         tw32_f(MAC_STATUS,
2750                                (MAC_STATUS_SYNC_CHANGED |
2751                                 MAC_STATUS_CFG_CHANGED));
2752                         udelay(40);
2753                         if ((tr32(MAC_STATUS) &
2754                              (MAC_STATUS_SYNC_CHANGED |
2755                               MAC_STATUS_CFG_CHANGED)) == 0)
2756                                 break;
2757                 }
2758
2759                 mac_status = tr32(MAC_STATUS);
2760                 if (current_link_up == 0 &&
2761                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2762                     !(mac_status & MAC_STATUS_RCVD_CFG))
2763                         current_link_up = 1;
2764         } else {
2765                 /* Forcing 1000FD link up. */
2766                 current_link_up = 1;
2767
2768                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2769                 udelay(40);
2770
2771                 tw32_f(MAC_MODE, tp->mac_mode);
2772                 udelay(40);
2773         }
2774
2775 out:
2776         return current_link_up;
2777 }
2778
2779 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2780 {
2781         u32 orig_pause_cfg;
2782         u16 orig_active_speed;
2783         u8 orig_active_duplex;
2784         u32 mac_status;
2785         int current_link_up;
2786         int i;
2787
2788         orig_pause_cfg =
2789                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2790                                   TG3_FLAG_TX_PAUSE));
2791         orig_active_speed = tp->link_config.active_speed;
2792         orig_active_duplex = tp->link_config.active_duplex;
2793
2794         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2795             netif_carrier_ok(tp->dev) &&
2796             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2797                 mac_status = tr32(MAC_STATUS);
2798                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2799                                MAC_STATUS_SIGNAL_DET |
2800                                MAC_STATUS_CFG_CHANGED |
2801                                MAC_STATUS_RCVD_CFG);
2802                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2803                                    MAC_STATUS_SIGNAL_DET)) {
2804                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2805                                             MAC_STATUS_CFG_CHANGED));
2806                         return 0;
2807                 }
2808         }
2809
2810         tw32_f(MAC_TX_AUTO_NEG, 0);
2811
2812         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2813         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2814         tw32_f(MAC_MODE, tp->mac_mode);
2815         udelay(40);
2816
2817         if (tp->phy_id == PHY_ID_BCM8002)
2818                 tg3_init_bcm8002(tp);
2819
2820         /* Enable link change event even when serdes polling.  */
2821         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2822         udelay(40);
2823
2824         current_link_up = 0;
2825         mac_status = tr32(MAC_STATUS);
2826
2827         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2828                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2829         else
2830                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2831
2832         tp->hw_status->status =
2833                 (SD_STATUS_UPDATED |
2834                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2835
2836         for (i = 0; i < 100; i++) {
2837                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2838                                     MAC_STATUS_CFG_CHANGED));
2839                 udelay(5);
2840                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2841                                          MAC_STATUS_CFG_CHANGED |
2842                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2843                         break;
2844         }
2845
2846         mac_status = tr32(MAC_STATUS);
2847         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2848                 current_link_up = 0;
2849                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2850                     tp->serdes_counter == 0) {
2851                         tw32_f(MAC_MODE, (tp->mac_mode |
2852                                           MAC_MODE_SEND_CONFIGS));
2853                         udelay(1);
2854                         tw32_f(MAC_MODE, tp->mac_mode);
2855                 }
2856         }
2857
2858         if (current_link_up == 1) {
2859                 tp->link_config.active_speed = SPEED_1000;
2860                 tp->link_config.active_duplex = DUPLEX_FULL;
2861                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2862                                     LED_CTRL_LNKLED_OVERRIDE |
2863                                     LED_CTRL_1000MBPS_ON));
2864         } else {
2865                 tp->link_config.active_speed = SPEED_INVALID;
2866                 tp->link_config.active_duplex = DUPLEX_INVALID;
2867                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2868                                     LED_CTRL_LNKLED_OVERRIDE |
2869                                     LED_CTRL_TRAFFIC_OVERRIDE));
2870         }
2871
2872         if (current_link_up != netif_carrier_ok(tp->dev)) {
2873                 if (current_link_up)
2874                         netif_carrier_on(tp->dev);
2875                 else
2876                         netif_carrier_off(tp->dev);
2877                 tg3_link_report(tp);
2878         } else {
2879                 u32 now_pause_cfg =
2880                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2881                                          TG3_FLAG_TX_PAUSE);
2882                 if (orig_pause_cfg != now_pause_cfg ||
2883                     orig_active_speed != tp->link_config.active_speed ||
2884                     orig_active_duplex != tp->link_config.active_duplex)
2885                         tg3_link_report(tp);
2886         }
2887
2888         return 0;
2889 }
2890
2891 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2892 {
2893         int current_link_up, err = 0;
2894         u32 bmsr, bmcr;
2895         u16 current_speed;
2896         u8 current_duplex;
2897
2898         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2899         tw32_f(MAC_MODE, tp->mac_mode);
2900         udelay(40);
2901
2902         tw32(MAC_EVENT, 0);
2903
2904         tw32_f(MAC_STATUS,
2905              (MAC_STATUS_SYNC_CHANGED |
2906               MAC_STATUS_CFG_CHANGED |
2907               MAC_STATUS_MI_COMPLETION |
2908               MAC_STATUS_LNKSTATE_CHANGED));
2909         udelay(40);
2910
2911         if (force_reset)
2912                 tg3_phy_reset(tp);
2913
2914         current_link_up = 0;
2915         current_speed = SPEED_INVALID;
2916         current_duplex = DUPLEX_INVALID;
2917
2918         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2919         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2920         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2921                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2922                         bmsr |= BMSR_LSTATUS;
2923                 else
2924                         bmsr &= ~BMSR_LSTATUS;
2925         }
2926
2927         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2928
2929         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2930             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2931                 /* do nothing, just check for link up at the end */
2932         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2933                 u32 adv, new_adv;
2934
2935                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2936                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2937                                   ADVERTISE_1000XPAUSE |
2938                                   ADVERTISE_1000XPSE_ASYM |
2939                                   ADVERTISE_SLCT);
2940
2941                 /* Always advertise symmetric PAUSE just like copper */
2942                 new_adv |= ADVERTISE_1000XPAUSE;
2943
2944                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2945                         new_adv |= ADVERTISE_1000XHALF;
2946                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2947                         new_adv |= ADVERTISE_1000XFULL;
2948
2949                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2950                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2951                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2952                         tg3_writephy(tp, MII_BMCR, bmcr);
2953
2954                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2955                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2956                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2957
2958                         return err;
2959                 }
2960         } else {
2961                 u32 new_bmcr;
2962
2963                 bmcr &= ~BMCR_SPEED1000;
2964                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2965
2966                 if (tp->link_config.duplex == DUPLEX_FULL)
2967                         new_bmcr |= BMCR_FULLDPLX;
2968
2969                 if (new_bmcr != bmcr) {
2970                         /* BMCR_SPEED1000 is a reserved bit that needs
2971                          * to be set on write.
2972                          */
2973                         new_bmcr |= BMCR_SPEED1000;
2974
2975                         /* Force a linkdown */
2976                         if (netif_carrier_ok(tp->dev)) {
2977                                 u32 adv;
2978
2979                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2980                                 adv &= ~(ADVERTISE_1000XFULL |
2981                                          ADVERTISE_1000XHALF |
2982                                          ADVERTISE_SLCT);
2983                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2984                                 tg3_writephy(tp, MII_BMCR, bmcr |
2985                                                            BMCR_ANRESTART |
2986                                                            BMCR_ANENABLE);
2987                                 udelay(10);
2988                                 netif_carrier_off(tp->dev);
2989                         }
2990                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2991                         bmcr = new_bmcr;
2992                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2993                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2994                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2995                             ASIC_REV_5714) {
2996                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2997                                         bmsr |= BMSR_LSTATUS;
2998                                 else
2999                                         bmsr &= ~BMSR_LSTATUS;
3000                         }
3001                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3002                 }
3003         }
3004
3005         if (bmsr & BMSR_LSTATUS) {
3006                 current_speed = SPEED_1000;
3007                 current_link_up = 1;
3008                 if (bmcr & BMCR_FULLDPLX)
3009                         current_duplex = DUPLEX_FULL;
3010                 else
3011                         current_duplex = DUPLEX_HALF;
3012
3013                 if (bmcr & BMCR_ANENABLE) {
3014                         u32 local_adv, remote_adv, common;
3015
3016                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3017                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3018                         common = local_adv & remote_adv;
3019                         if (common & (ADVERTISE_1000XHALF |
3020                                       ADVERTISE_1000XFULL)) {
3021                                 if (common & ADVERTISE_1000XFULL)
3022                                         current_duplex = DUPLEX_FULL;
3023                                 else
3024                                         current_duplex = DUPLEX_HALF;
3025
3026                                 tg3_setup_flow_control(tp, local_adv,
3027                                                        remote_adv);
3028                         }
3029                         else
3030                                 current_link_up = 0;
3031                 }
3032         }
3033
3034         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3035         if (tp->link_config.active_duplex == DUPLEX_HALF)
3036                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3037
3038         tw32_f(MAC_MODE, tp->mac_mode);
3039         udelay(40);
3040
3041         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3042
3043         tp->link_config.active_speed = current_speed;
3044         tp->link_config.active_duplex = current_duplex;
3045
3046         if (current_link_up != netif_carrier_ok(tp->dev)) {
3047                 if (current_link_up)
3048                         netif_carrier_on(tp->dev);
3049                 else {
3050                         netif_carrier_off(tp->dev);
3051                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3052                 }
3053                 tg3_link_report(tp);
3054         }
3055         return err;
3056 }
3057
3058 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3059 {
3060         if (tp->serdes_counter) {
3061                 /* Give autoneg time to complete. */
3062                 tp->serdes_counter--;
3063                 return;
3064         }
3065         if (!netif_carrier_ok(tp->dev) &&
3066             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3067                 u32 bmcr;
3068
3069                 tg3_readphy(tp, MII_BMCR, &bmcr);
3070                 if (bmcr & BMCR_ANENABLE) {
3071                         u32 phy1, phy2;
3072
3073                         /* Select shadow register 0x1f */
3074                         tg3_writephy(tp, 0x1c, 0x7c00);
3075                         tg3_readphy(tp, 0x1c, &phy1);
3076
3077                         /* Select expansion interrupt status register */
3078                         tg3_writephy(tp, 0x17, 0x0f01);
3079                         tg3_readphy(tp, 0x15, &phy2);
3080                         tg3_readphy(tp, 0x15, &phy2);
3081
3082                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3083                                 /* We have signal detect and not receiving
3084                                  * config code words, link is up by parallel
3085                                  * detection.
3086                                  */
3087
3088                                 bmcr &= ~BMCR_ANENABLE;
3089                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3090                                 tg3_writephy(tp, MII_BMCR, bmcr);
3091                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3092                         }
3093                 }
3094         }
3095         else if (netif_carrier_ok(tp->dev) &&
3096                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3097                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3098                 u32 phy2;
3099
3100                 /* Select expansion interrupt status register */
3101                 tg3_writephy(tp, 0x17, 0x0f01);
3102                 tg3_readphy(tp, 0x15, &phy2);
3103                 if (phy2 & 0x20) {
3104                         u32 bmcr;
3105
3106                         /* Config code words received, turn on autoneg. */
3107                         tg3_readphy(tp, MII_BMCR, &bmcr);
3108                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3109
3110                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3111
3112                 }
3113         }
3114 }
3115
3116 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3117 {
3118         int err;
3119
3120         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3121                 err = tg3_setup_fiber_phy(tp, force_reset);
3122         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3123                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3124         } else {
3125                 err = tg3_setup_copper_phy(tp, force_reset);
3126         }
3127
3128         if (tp->link_config.active_speed == SPEED_1000 &&
3129             tp->link_config.active_duplex == DUPLEX_HALF)
3130                 tw32(MAC_TX_LENGTHS,
3131                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3132                       (6 << TX_LENGTHS_IPG_SHIFT) |
3133                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3134         else
3135                 tw32(MAC_TX_LENGTHS,
3136                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3137                       (6 << TX_LENGTHS_IPG_SHIFT) |
3138                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3139
3140         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3141                 if (netif_carrier_ok(tp->dev)) {
3142                         tw32(HOSTCC_STAT_COAL_TICKS,
3143                              tp->coal.stats_block_coalesce_usecs);
3144                 } else {
3145                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3146                 }
3147         }
3148
3149         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3150                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3151                 if (!netif_carrier_ok(tp->dev))
3152                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3153                               tp->pwrmgmt_thresh;
3154                 else
3155                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3156                 tw32(PCIE_PWR_MGMT_THRESH, val);
3157         }
3158
3159         return err;
3160 }
3161
3162 /* This is called whenever we suspect that the system chipset is re-
3163  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3164  * is bogus tx completions. We try to recover by setting the
3165  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3166  * in the workqueue.
3167  */
3168 static void tg3_tx_recover(struct tg3 *tp)
3169 {
3170         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3171                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3172
3173         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3174                "mapped I/O cycles to the network device, attempting to "
3175                "recover. Please report the problem to the driver maintainer "
3176                "and include system chipset information.\n", tp->dev->name);
3177
3178         spin_lock(&tp->lock);
3179         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3180         spin_unlock(&tp->lock);
3181 }
3182
3183 static inline u32 tg3_tx_avail(struct tg3 *tp)
3184 {
3185         smp_mb();
3186         return (tp->tx_pending -
3187                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3188 }
3189
3190 /* Tigon3 never reports partial packet sends.  So we do not
3191  * need special logic to handle SKBs that have not had all
3192  * of their frags sent yet, like SunGEM does.
3193  */
3194 static void tg3_tx(struct tg3 *tp)
3195 {
3196         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3197         u32 sw_idx = tp->tx_cons;
3198
3199         while (sw_idx != hw_idx) {
3200                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3201                 struct sk_buff *skb = ri->skb;
3202                 int i, tx_bug = 0;
3203
3204                 if (unlikely(skb == NULL)) {
3205                         tg3_tx_recover(tp);
3206                         return;
3207                 }
3208
3209                 pci_unmap_single(tp->pdev,
3210                                  pci_unmap_addr(ri, mapping),
3211                                  skb_headlen(skb),
3212                                  PCI_DMA_TODEVICE);
3213
3214                 ri->skb = NULL;
3215
3216                 sw_idx = NEXT_TX(sw_idx);
3217
3218                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3219                         ri = &tp->tx_buffers[sw_idx];
3220                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3221                                 tx_bug = 1;
3222
3223                         pci_unmap_page(tp->pdev,
3224                                        pci_unmap_addr(ri, mapping),
3225                                        skb_shinfo(skb)->frags[i].size,
3226                                        PCI_DMA_TODEVICE);
3227
3228                         sw_idx = NEXT_TX(sw_idx);
3229                 }
3230
3231                 dev_kfree_skb(skb);
3232
3233                 if (unlikely(tx_bug)) {
3234                         tg3_tx_recover(tp);
3235                         return;
3236                 }
3237         }
3238
3239         tp->tx_cons = sw_idx;
3240
3241         /* Need to make the tx_cons update visible to tg3_start_xmit()
3242          * before checking for netif_queue_stopped().  Without the
3243          * memory barrier, there is a small possibility that tg3_start_xmit()
3244          * will miss it and cause the queue to be stopped forever.
3245          */
3246         smp_mb();
3247
3248         if (unlikely(netif_queue_stopped(tp->dev) &&
3249                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3250                 netif_tx_lock(tp->dev);
3251                 if (netif_queue_stopped(tp->dev) &&
3252                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3253                         netif_wake_queue(tp->dev);
3254                 netif_tx_unlock(tp->dev);
3255         }
3256 }
3257
3258 /* Returns size of skb allocated or < 0 on error.
3259  *
3260  * We only need to fill in the address because the other members
3261  * of the RX descriptor are invariant, see tg3_init_rings.
3262  *
3263  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3264  * posting buffers we only dirty the first cache line of the RX
3265  * descriptor (containing the address).  Whereas for the RX status
3266  * buffers the cpu only reads the last cacheline of the RX descriptor
3267  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3268  */
3269 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3270                             int src_idx, u32 dest_idx_unmasked)
3271 {
3272         struct tg3_rx_buffer_desc *desc;
3273         struct ring_info *map, *src_map;
3274         struct sk_buff *skb;
3275         dma_addr_t mapping;
3276         int skb_size, dest_idx;
3277
3278         src_map = NULL;
3279         switch (opaque_key) {
3280         case RXD_OPAQUE_RING_STD:
3281                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3282                 desc = &tp->rx_std[dest_idx];
3283                 map = &tp->rx_std_buffers[dest_idx];
3284                 if (src_idx >= 0)
3285                         src_map = &tp->rx_std_buffers[src_idx];
3286                 skb_size = tp->rx_pkt_buf_sz;
3287                 break;
3288
3289         case RXD_OPAQUE_RING_JUMBO:
3290                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3291                 desc = &tp->rx_jumbo[dest_idx];
3292                 map = &tp->rx_jumbo_buffers[dest_idx];
3293                 if (src_idx >= 0)
3294                         src_map = &tp->rx_jumbo_buffers[src_idx];
3295                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3296                 break;
3297
3298         default:
3299                 return -EINVAL;
3300         };
3301
3302         /* Do not overwrite any of the map or rp information
3303          * until we are sure we can commit to a new buffer.
3304          *
3305          * Callers depend upon this behavior and assume that
3306          * we leave everything unchanged if we fail.
3307          */
3308         skb = netdev_alloc_skb(tp->dev, skb_size);
3309         if (skb == NULL)
3310                 return -ENOMEM;
3311
3312         skb_reserve(skb, tp->rx_offset);
3313
3314         mapping = pci_map_single(tp->pdev, skb->data,
3315                                  skb_size - tp->rx_offset,
3316                                  PCI_DMA_FROMDEVICE);
3317
3318         map->skb = skb;
3319         pci_unmap_addr_set(map, mapping, mapping);
3320
3321         if (src_map != NULL)
3322                 src_map->skb = NULL;
3323
3324         desc->addr_hi = ((u64)mapping >> 32);
3325         desc->addr_lo = ((u64)mapping & 0xffffffff);
3326
3327         return skb_size;
3328 }
3329
3330 /* We only need to move over in the address because the other
3331  * members of the RX descriptor are invariant.  See notes above
3332  * tg3_alloc_rx_skb for full details.
3333  */
3334 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3335                            int src_idx, u32 dest_idx_unmasked)
3336 {
3337         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3338         struct ring_info *src_map, *dest_map;
3339         int dest_idx;
3340
3341         switch (opaque_key) {
3342         case RXD_OPAQUE_RING_STD:
3343                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3344                 dest_desc = &tp->rx_std[dest_idx];
3345                 dest_map = &tp->rx_std_buffers[dest_idx];
3346                 src_desc = &tp->rx_std[src_idx];
3347                 src_map = &tp->rx_std_buffers[src_idx];
3348                 break;
3349
3350         case RXD_OPAQUE_RING_JUMBO:
3351                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3352                 dest_desc = &tp->rx_jumbo[dest_idx];
3353                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3354                 src_desc = &tp->rx_jumbo[src_idx];
3355                 src_map = &tp->rx_jumbo_buffers[src_idx];
3356                 break;
3357
3358         default:
3359                 return;
3360         };
3361
3362         dest_map->skb = src_map->skb;
3363         pci_unmap_addr_set(dest_map, mapping,
3364                            pci_unmap_addr(src_map, mapping));
3365         dest_desc->addr_hi = src_desc->addr_hi;
3366         dest_desc->addr_lo = src_desc->addr_lo;
3367
3368         src_map->skb = NULL;
3369 }
3370
3371 #if TG3_VLAN_TAG_USED
3372 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3373 {
3374         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3375 }
3376 #endif
3377
3378 /* The RX ring scheme is composed of multiple rings which post fresh
3379  * buffers to the chip, and one special ring the chip uses to report
3380  * status back to the host.
3381  *
3382  * The special ring reports the status of received packets to the
3383  * host.  The chip does not write into the original descriptor the
3384  * RX buffer was obtained from.  The chip simply takes the original
3385  * descriptor as provided by the host, updates the status and length
3386  * field, then writes this into the next status ring entry.
3387  *
3388  * Each ring the host uses to post buffers to the chip is described
3389  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3390  * it is first placed into the on-chip ram.  When the packet's length
3391  * is known, it walks down the TG3_BDINFO entries to select the ring.
3392  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3393  * which is within the range of the new packet's length is chosen.
3394  *
3395  * The "separate ring for rx status" scheme may sound queer, but it makes
3396  * sense from a cache coherency perspective.  If only the host writes
3397  * to the buffer post rings, and only the chip writes to the rx status
3398  * rings, then cache lines never move beyond shared-modified state.
3399  * If both the host and chip were to write into the same ring, cache line
3400  * eviction could occur since both entities want it in an exclusive state.
3401  */
3402 static int tg3_rx(struct tg3 *tp, int budget)
3403 {
3404         u32 work_mask, rx_std_posted = 0;
3405         u32 sw_idx = tp->rx_rcb_ptr;
3406         u16 hw_idx;
3407         int received;
3408
3409         hw_idx = tp->hw_status->idx[0].rx_producer;
3410         /*
3411          * We need to order the read of hw_idx and the read of
3412          * the opaque cookie.
3413          */
3414         rmb();
3415         work_mask = 0;
3416         received = 0;
3417         while (sw_idx != hw_idx && budget > 0) {
3418                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3419                 unsigned int len;
3420                 struct sk_buff *skb;
3421                 dma_addr_t dma_addr;
3422                 u32 opaque_key, desc_idx, *post_ptr;
3423
3424                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3425                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3426                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3427                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3428                                                   mapping);
3429                         skb = tp->rx_std_buffers[desc_idx].skb;
3430                         post_ptr = &tp->rx_std_ptr;
3431                         rx_std_posted++;
3432                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3433                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3434                                                   mapping);
3435                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3436                         post_ptr = &tp->rx_jumbo_ptr;
3437                 }
3438                 else {
3439                         goto next_pkt_nopost;
3440                 }
3441
3442                 work_mask |= opaque_key;
3443
3444                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3445                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3446                 drop_it:
3447                         tg3_recycle_rx(tp, opaque_key,
3448                                        desc_idx, *post_ptr);
3449                 drop_it_no_recycle:
3450                         /* Other statistics kept track of by card. */
3451                         tp->net_stats.rx_dropped++;
3452                         goto next_pkt;
3453                 }
3454
3455                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3456
3457                 if (len > RX_COPY_THRESHOLD
3458                         && tp->rx_offset == 2
3459                         /* rx_offset != 2 iff this is a 5701 card running
3460                          * in PCI-X mode [see tg3_get_invariants()] */
3461                 ) {
3462                         int skb_size;
3463
3464                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3465                                                     desc_idx, *post_ptr);
3466                         if (skb_size < 0)
3467                                 goto drop_it;
3468
3469                         pci_unmap_single(tp->pdev, dma_addr,
3470                                          skb_size - tp->rx_offset,
3471                                          PCI_DMA_FROMDEVICE);
3472
3473                         skb_put(skb, len);
3474                 } else {
3475                         struct sk_buff *copy_skb;
3476
3477                         tg3_recycle_rx(tp, opaque_key,
3478                                        desc_idx, *post_ptr);
3479
3480                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3481                         if (copy_skb == NULL)
3482                                 goto drop_it_no_recycle;
3483
3484                         skb_reserve(copy_skb, 2);
3485                         skb_put(copy_skb, len);
3486                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3487                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3488                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3489
3490                         /* We'll reuse the original ring buffer. */
3491                         skb = copy_skb;
3492                 }
3493
3494                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3495                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3496                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3497                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3498                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3499                 else
3500                         skb->ip_summed = CHECKSUM_NONE;
3501
3502                 skb->protocol = eth_type_trans(skb, tp->dev);
3503 #if TG3_VLAN_TAG_USED
3504                 if (tp->vlgrp != NULL &&
3505                     desc->type_flags & RXD_FLAG_VLAN) {
3506                         tg3_vlan_rx(tp, skb,
3507                                     desc->err_vlan & RXD_VLAN_MASK);
3508                 } else
3509 #endif
3510                         netif_receive_skb(skb);
3511
3512                 tp->dev->last_rx = jiffies;
3513                 received++;
3514                 budget--;
3515
3516 next_pkt:
3517                 (*post_ptr)++;
3518
3519                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3520                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3521
3522                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3523                                      TG3_64BIT_REG_LOW, idx);
3524                         work_mask &= ~RXD_OPAQUE_RING_STD;
3525                         rx_std_posted = 0;
3526                 }
3527 next_pkt_nopost:
3528                 sw_idx++;
3529                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3530
3531                 /* Refresh hw_idx to see if there is new work */
3532                 if (sw_idx == hw_idx) {
3533                         hw_idx = tp->hw_status->idx[0].rx_producer;
3534                         rmb();
3535                 }
3536         }
3537
3538         /* ACK the status ring. */
3539         tp->rx_rcb_ptr = sw_idx;
3540         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3541
3542         /* Refill RX ring(s). */
3543         if (work_mask & RXD_OPAQUE_RING_STD) {
3544                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3545                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3546                              sw_idx);
3547         }
3548         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3549                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3550                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3551                              sw_idx);
3552         }
3553         mmiowb();
3554
3555         return received;
3556 }
3557
3558 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3559 {
3560         struct tg3_hw_status *sblk = tp->hw_status;
3561
3562         /* handle link change and other phy events */
3563         if (!(tp->tg3_flags &
3564               (TG3_FLAG_USE_LINKCHG_REG |
3565                TG3_FLAG_POLL_SERDES))) {
3566                 if (sblk->status & SD_STATUS_LINK_CHG) {
3567                         sblk->status = SD_STATUS_UPDATED |
3568                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3569                         spin_lock(&tp->lock);
3570                         tg3_setup_phy(tp, 0);
3571                         spin_unlock(&tp->lock);
3572                 }
3573         }
3574
3575         /* run TX completion thread */
3576         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3577                 tg3_tx(tp);
3578                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3579                         return work_done;
3580         }
3581
3582         /* run RX thread, within the bounds set by NAPI.
3583          * All RX "locking" is done by ensuring outside
3584          * code synchronizes with tg3->napi.poll()
3585          */
3586         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3587                 work_done += tg3_rx(tp, budget - work_done);
3588
3589         return work_done;
3590 }
3591
3592 static int tg3_poll(struct napi_struct *napi, int budget)
3593 {
3594         struct tg3 *tp = container_of(napi, struct tg3, napi);
3595         int work_done = 0;
3596         struct tg3_hw_status *sblk = tp->hw_status;
3597
3598         while (1) {
3599                 work_done = tg3_poll_work(tp, work_done, budget);
3600
3601                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3602                         goto tx_recovery;
3603
3604                 if (unlikely(work_done >= budget))
3605                         break;
3606
3607                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3608                         /* tp->last_tag is used in tg3_restart_ints() below
3609                          * to tell the hw how much work has been processed,
3610                          * so we must read it before checking for more work.
3611                          */
3612                         tp->last_tag = sblk->status_tag;
3613                         rmb();
3614                 } else
3615                         sblk->status &= ~SD_STATUS_UPDATED;
3616
3617                 if (likely(!tg3_has_work(tp))) {
3618                         netif_rx_complete(tp->dev, napi);
3619                         tg3_restart_ints(tp);
3620                         break;
3621                 }
3622         }
3623
3624         return work_done;
3625
3626 tx_recovery:
3627         /* work_done is guaranteed to be less than budget. */
3628         netif_rx_complete(tp->dev, napi);
3629         schedule_work(&tp->reset_task);
3630         return work_done;
3631 }
3632
3633 static void tg3_irq_quiesce(struct tg3 *tp)
3634 {
3635         BUG_ON(tp->irq_sync);
3636
3637         tp->irq_sync = 1;
3638         smp_mb();
3639
3640         synchronize_irq(tp->pdev->irq);
3641 }
3642
3643 static inline int tg3_irq_sync(struct tg3 *tp)
3644 {
3645         return tp->irq_sync;
3646 }
3647
3648 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3649  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3650  * with as well.  Most of the time, this is not necessary except when
3651  * shutting down the device.
3652  */
3653 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3654 {
3655         spin_lock_bh(&tp->lock);
3656         if (irq_sync)
3657                 tg3_irq_quiesce(tp);
3658 }
3659
3660 static inline void tg3_full_unlock(struct tg3 *tp)
3661 {
3662         spin_unlock_bh(&tp->lock);
3663 }
3664
3665 /* One-shot MSI handler - Chip automatically disables interrupt
3666  * after sending MSI so driver doesn't have to do it.
3667  */
3668 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3669 {
3670         struct net_device *dev = dev_id;
3671         struct tg3 *tp = netdev_priv(dev);
3672
3673         prefetch(tp->hw_status);
3674         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3675
3676         if (likely(!tg3_irq_sync(tp)))
3677                 netif_rx_schedule(dev, &tp->napi);
3678
3679         return IRQ_HANDLED;
3680 }
3681
3682 /* MSI ISR - No need to check for interrupt sharing and no need to
3683  * flush status block and interrupt mailbox. PCI ordering rules
3684  * guarantee that MSI will arrive after the status block.
3685  */
3686 static irqreturn_t tg3_msi(int irq, void *dev_id)
3687 {
3688         struct net_device *dev = dev_id;
3689         struct tg3 *tp = netdev_priv(dev);
3690
3691         prefetch(tp->hw_status);
3692         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3693         /*
3694          * Writing any value to intr-mbox-0 clears PCI INTA# and
3695          * chip-internal interrupt pending events.
3696          * Writing non-zero to intr-mbox-0 additional tells the
3697          * NIC to stop sending us irqs, engaging "in-intr-handler"
3698          * event coalescing.
3699          */
3700         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3701         if (likely(!tg3_irq_sync(tp)))
3702                 netif_rx_schedule(dev, &tp->napi);
3703
3704         return IRQ_RETVAL(1);
3705 }
3706
3707 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3708 {
3709         struct net_device *dev = dev_id;
3710         struct tg3 *tp = netdev_priv(dev);
3711         struct tg3_hw_status *sblk = tp->hw_status;
3712         unsigned int handled = 1;
3713
3714         /* In INTx mode, it is possible for the interrupt to arrive at
3715          * the CPU before the status block posted prior to the interrupt.
3716          * Reading the PCI State register will confirm whether the
3717          * interrupt is ours and will flush the status block.
3718          */
3719         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3720                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3721                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3722                         handled = 0;
3723                         goto out;
3724                 }
3725         }
3726
3727         /*
3728          * Writing any value to intr-mbox-0 clears PCI INTA# and
3729          * chip-internal interrupt pending events.
3730          * Writing non-zero to intr-mbox-0 additional tells the
3731          * NIC to stop sending us irqs, engaging "in-intr-handler"
3732          * event coalescing.
3733          *
3734          * Flush the mailbox to de-assert the IRQ immediately to prevent
3735          * spurious interrupts.  The flush impacts performance but
3736          * excessive spurious interrupts can be worse in some cases.
3737          */
3738         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3739         if (tg3_irq_sync(tp))
3740                 goto out;
3741         sblk->status &= ~SD_STATUS_UPDATED;
3742         if (likely(tg3_has_work(tp))) {
3743                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3744                 netif_rx_schedule(dev, &tp->napi);
3745         } else {
3746                 /* No work, shared interrupt perhaps?  re-enable
3747                  * interrupts, and flush that PCI write
3748                  */
3749                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3750                                0x00000000);
3751         }
3752 out:
3753         return IRQ_RETVAL(handled);
3754 }
3755
3756 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3757 {
3758         struct net_device *dev = dev_id;
3759         struct tg3 *tp = netdev_priv(dev);
3760         struct tg3_hw_status *sblk = tp->hw_status;
3761         unsigned int handled = 1;
3762
3763         /* In INTx mode, it is possible for the interrupt to arrive at
3764          * the CPU before the status block posted prior to the interrupt.
3765          * Reading the PCI State register will confirm whether the
3766          * interrupt is ours and will flush the status block.
3767          */
3768         if (unlikely(sblk->status_tag == tp->last_tag)) {
3769                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3770                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3771                         handled = 0;
3772                         goto out;
3773                 }
3774         }
3775
3776         /*
3777          * writing any value to intr-mbox-0 clears PCI INTA# and
3778          * chip-internal interrupt pending events.
3779          * writing non-zero to intr-mbox-0 additional tells the
3780          * NIC to stop sending us irqs, engaging "in-intr-handler"
3781          * event coalescing.
3782          *
3783          * Flush the mailbox to de-assert the IRQ immediately to prevent
3784          * spurious interrupts.  The flush impacts performance but
3785          * excessive spurious interrupts can be worse in some cases.
3786          */
3787         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3788         if (tg3_irq_sync(tp))
3789                 goto out;
3790         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3791                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3792                 /* Update last_tag to mark that this status has been
3793                  * seen. Because interrupt may be shared, we may be
3794                  * racing with tg3_poll(), so only update last_tag
3795                  * if tg3_poll() is not scheduled.
3796                  */
3797                 tp->last_tag = sblk->status_tag;
3798                 __netif_rx_schedule(dev, &tp->napi);
3799         }
3800 out:
3801         return IRQ_RETVAL(handled);
3802 }
3803
3804 /* ISR for interrupt test */
3805 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3806 {
3807         struct net_device *dev = dev_id;
3808         struct tg3 *tp = netdev_priv(dev);
3809         struct tg3_hw_status *sblk = tp->hw_status;
3810
3811         if ((sblk->status & SD_STATUS_UPDATED) ||
3812             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3813                 tg3_disable_ints(tp);
3814                 return IRQ_RETVAL(1);
3815         }
3816         return IRQ_RETVAL(0);
3817 }
3818
3819 static int tg3_init_hw(struct tg3 *, int);
3820 static int tg3_halt(struct tg3 *, int, int);
3821
3822 /* Restart hardware after configuration changes, self-test, etc.
3823  * Invoked with tp->lock held.
3824  */
3825 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3826 {
3827         int err;
3828
3829         err = tg3_init_hw(tp, reset_phy);
3830         if (err) {
3831                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3832                        "aborting.\n", tp->dev->name);
3833                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3834                 tg3_full_unlock(tp);
3835                 del_timer_sync(&tp->timer);
3836                 tp->irq_sync = 0;
3837                 napi_enable(&tp->napi);
3838                 dev_close(tp->dev);
3839                 tg3_full_lock(tp, 0);
3840         }
3841         return err;
3842 }
3843
3844 #ifdef CONFIG_NET_POLL_CONTROLLER
3845 static void tg3_poll_controller(struct net_device *dev)
3846 {
3847         struct tg3 *tp = netdev_priv(dev);
3848
3849         tg3_interrupt(tp->pdev->irq, dev);
3850 }
3851 #endif
3852
3853 static void tg3_reset_task(struct work_struct *work)
3854 {
3855         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3856         unsigned int restart_timer;
3857
3858         tg3_full_lock(tp, 0);
3859
3860         if (!netif_running(tp->dev)) {
3861                 tg3_full_unlock(tp);
3862                 return;
3863         }
3864
3865         tg3_full_unlock(tp);
3866
3867         tg3_netif_stop(tp);
3868
3869         tg3_full_lock(tp, 1);
3870
3871         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3872         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3873
3874         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3875                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3876                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3877                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3878                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3879         }
3880
3881         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3882         if (tg3_init_hw(tp, 1))
3883                 goto out;
3884
3885         tg3_netif_start(tp);
3886
3887         if (restart_timer)
3888                 mod_timer(&tp->timer, jiffies + 1);
3889
3890 out:
3891         tg3_full_unlock(tp);
3892 }
3893
3894 static void tg3_dump_short_state(struct tg3 *tp)
3895 {
3896         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3897                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3898         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3899                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3900 }
3901
3902 static void tg3_tx_timeout(struct net_device *dev)
3903 {
3904         struct tg3 *tp = netdev_priv(dev);
3905
3906         if (netif_msg_tx_err(tp)) {
3907                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3908                        dev->name);
3909                 tg3_dump_short_state(tp);
3910         }
3911
3912         schedule_work(&tp->reset_task);
3913 }
3914
3915 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3916 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3917 {
3918         u32 base = (u32) mapping & 0xffffffff;
3919
3920         return ((base > 0xffffdcc0) &&
3921                 (base + len + 8 < base));
3922 }
3923
3924 /* Test for DMA addresses > 40-bit */
3925 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3926                                           int len)
3927 {
3928 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3929         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3930                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3931         return 0;
3932 #else
3933         return 0;
3934 #endif
3935 }
3936
3937 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3938
3939 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3940 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3941                                        u32 last_plus_one, u32 *start,
3942                                        u32 base_flags, u32 mss)
3943 {
3944         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3945         dma_addr_t new_addr = 0;
3946         u32 entry = *start;
3947         int i, ret = 0;
3948
3949         if (!new_skb) {
3950                 ret = -1;
3951         } else {
3952                 /* New SKB is guaranteed to be linear. */
3953                 entry = *start;
3954                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3955                                           PCI_DMA_TODEVICE);
3956                 /* Make sure new skb does not cross any 4G boundaries.
3957                  * Drop the packet if it does.
3958                  */
3959                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3960                         ret = -1;
3961                         dev_kfree_skb(new_skb);
3962                         new_skb = NULL;
3963                 } else {
3964                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3965                                     base_flags, 1 | (mss << 1));
3966                         *start = NEXT_TX(entry);
3967                 }
3968         }
3969
3970         /* Now clean up the sw ring entries. */
3971         i = 0;
3972         while (entry != last_plus_one) {
3973                 int len;
3974
3975                 if (i == 0)
3976                         len = skb_headlen(skb);
3977                 else
3978                         len = skb_shinfo(skb)->frags[i-1].size;
3979                 pci_unmap_single(tp->pdev,
3980                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3981                                  len, PCI_DMA_TODEVICE);
3982                 if (i == 0) {
3983                         tp->tx_buffers[entry].skb = new_skb;
3984                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3985                 } else {
3986                         tp->tx_buffers[entry].skb = NULL;
3987                 }
3988                 entry = NEXT_TX(entry);
3989                 i++;
3990         }
3991
3992         dev_kfree_skb(skb);
3993
3994         return ret;
3995 }
3996
3997 static void tg3_set_txd(struct tg3 *tp, int entry,
3998                         dma_addr_t mapping, int len, u32 flags,
3999                         u32 mss_and_is_end)
4000 {
4001         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4002         int is_end = (mss_and_is_end & 0x1);
4003         u32 mss = (mss_and_is_end >> 1);
4004         u32 vlan_tag = 0;
4005
4006         if (is_end)
4007                 flags |= TXD_FLAG_END;
4008         if (flags & TXD_FLAG_VLAN) {
4009                 vlan_tag = flags >> 16;
4010                 flags &= 0xffff;
4011         }
4012         vlan_tag |= (mss << TXD_MSS_SHIFT);
4013
4014         txd->addr_hi = ((u64) mapping >> 32);
4015         txd->addr_lo = ((u64) mapping & 0xffffffff);
4016         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4017         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4018 }
4019
4020 /* hard_start_xmit for devices that don't have any bugs and
4021  * support TG3_FLG2_HW_TSO_2 only.
4022  */
4023 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4024 {
4025         struct tg3 *tp = netdev_priv(dev);
4026         dma_addr_t mapping;
4027         u32 len, entry, base_flags, mss;
4028
4029         len = skb_headlen(skb);
4030
4031         /* We are running in BH disabled context with netif_tx_lock
4032          * and TX reclaim runs via tp->napi.poll inside of a software
4033          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4034          * no IRQ context deadlocks to worry about either.  Rejoice!
4035          */
4036         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4037                 if (!netif_queue_stopped(dev)) {
4038                         netif_stop_queue(dev);
4039
4040                         /* This is a hard error, log it. */
4041                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4042                                "queue awake!\n", dev->name);
4043                 }
4044                 return NETDEV_TX_BUSY;
4045         }
4046
4047         entry = tp->tx_prod;
4048         base_flags = 0;
4049         mss = 0;
4050         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4051                 int tcp_opt_len, ip_tcp_len;
4052
4053                 if (skb_header_cloned(skb) &&
4054                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4055                         dev_kfree_skb(skb);
4056                         goto out_unlock;
4057                 }
4058
4059                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4060                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4061                 else {
4062                         struct iphdr *iph = ip_hdr(skb);
4063
4064                         tcp_opt_len = tcp_optlen(skb);
4065                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4066
4067                         iph->check = 0;
4068                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4069                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4070                 }
4071
4072                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4073                                TXD_FLAG_CPU_POST_DMA);
4074
4075                 tcp_hdr(skb)->check = 0;
4076
4077         }
4078         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4079                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4080 #if TG3_VLAN_TAG_USED
4081         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4082                 base_flags |= (TXD_FLAG_VLAN |
4083                                (vlan_tx_tag_get(skb) << 16));
4084 #endif
4085
4086         /* Queue skb data, a.k.a. the main skb fragment. */
4087         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4088
4089         tp->tx_buffers[entry].skb = skb;
4090         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4091
4092         tg3_set_txd(tp, entry, mapping, len, base_flags,
4093                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4094
4095         entry = NEXT_TX(entry);
4096
4097         /* Now loop through additional data fragments, and queue them. */
4098         if (skb_shinfo(skb)->nr_frags > 0) {
4099                 unsigned int i, last;
4100
4101                 last = skb_shinfo(skb)->nr_frags - 1;
4102                 for (i = 0; i <= last; i++) {
4103                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4104
4105                         len = frag->size;
4106                         mapping = pci_map_page(tp->pdev,
4107                                                frag->page,
4108                                                frag->page_offset,
4109                                                len, PCI_DMA_TODEVICE);
4110
4111                         tp->tx_buffers[entry].skb = NULL;
4112                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4113
4114                         tg3_set_txd(tp, entry, mapping, len,
4115                                     base_flags, (i == last) | (mss << 1));
4116
4117                         entry = NEXT_TX(entry);
4118                 }
4119         }
4120
4121         /* Packets are ready, update Tx producer idx local and on card. */
4122         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4123
4124         tp->tx_prod = entry;
4125         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4126                 netif_stop_queue(dev);
4127                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4128                         netif_wake_queue(tp->dev);
4129         }
4130
4131 out_unlock:
4132         mmiowb();
4133
4134         dev->trans_start = jiffies;
4135
4136         return NETDEV_TX_OK;
4137 }
4138
4139 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4140
4141 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4142  * TSO header is greater than 80 bytes.
4143  */
4144 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4145 {
4146         struct sk_buff *segs, *nskb;
4147
4148         /* Estimate the number of fragments in the worst case */
4149         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4150                 netif_stop_queue(tp->dev);
4151                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4152                         return NETDEV_TX_BUSY;
4153
4154                 netif_wake_queue(tp->dev);
4155         }
4156
4157         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4158         if (unlikely(IS_ERR(segs)))
4159                 goto tg3_tso_bug_end;
4160
4161         do {
4162                 nskb = segs;
4163                 segs = segs->next;
4164                 nskb->next = NULL;
4165                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4166         } while (segs);
4167
4168 tg3_tso_bug_end:
4169         dev_kfree_skb(skb);
4170
4171         return NETDEV_TX_OK;
4172 }
4173
4174 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4175  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4176  */
4177 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4178 {
4179         struct tg3 *tp = netdev_priv(dev);
4180         dma_addr_t mapping;
4181         u32 len, entry, base_flags, mss;
4182         int would_hit_hwbug;
4183
4184         len = skb_headlen(skb);
4185
4186         /* We are running in BH disabled context with netif_tx_lock
4187          * and TX reclaim runs via tp->napi.poll inside of a software
4188          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4189          * no IRQ context deadlocks to worry about either.  Rejoice!
4190          */
4191         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4192                 if (!netif_queue_stopped(dev)) {
4193                         netif_stop_queue(dev);
4194
4195                         /* This is a hard error, log it. */
4196                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4197                                "queue awake!\n", dev->name);
4198                 }
4199                 return NETDEV_TX_BUSY;
4200         }
4201
4202         entry = tp->tx_prod;
4203         base_flags = 0;
4204         if (skb->ip_summed == CHECKSUM_PARTIAL)
4205                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4206         mss = 0;
4207         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4208                 struct iphdr *iph;
4209                 int tcp_opt_len, ip_tcp_len, hdr_len;
4210
4211                 if (skb_header_cloned(skb) &&
4212                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4213                         dev_kfree_skb(skb);
4214                         goto out_unlock;
4215                 }
4216
4217                 tcp_opt_len = tcp_optlen(skb);
4218                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4219
4220                 hdr_len = ip_tcp_len + tcp_opt_len;
4221                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4222                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4223                         return (tg3_tso_bug(tp, skb));
4224
4225                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4226                                TXD_FLAG_CPU_POST_DMA);
4227
4228                 iph = ip_hdr(skb);
4229                 iph->check = 0;
4230                 iph->tot_len = htons(mss + hdr_len);
4231                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4232                         tcp_hdr(skb)->check = 0;
4233                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4234                 } else
4235                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4236                                                                  iph->daddr, 0,
4237                                                                  IPPROTO_TCP,
4238                                                                  0);
4239
4240                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4241                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4242                         if (tcp_opt_len || iph->ihl > 5) {
4243                                 int tsflags;
4244
4245                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4246                                 mss |= (tsflags << 11);
4247                         }
4248                 } else {
4249                         if (tcp_opt_len || iph->ihl > 5) {
4250                                 int tsflags;
4251
4252                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4253                                 base_flags |= tsflags << 12;
4254                         }
4255                 }
4256         }
4257 #if TG3_VLAN_TAG_USED
4258         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4259                 base_flags |= (TXD_FLAG_VLAN |
4260                                (vlan_tx_tag_get(skb) << 16));
4261 #endif
4262
4263         /* Queue skb data, a.k.a. the main skb fragment. */
4264         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4265
4266         tp->tx_buffers[entry].skb = skb;
4267         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4268
4269         would_hit_hwbug = 0;
4270
4271         if (tg3_4g_overflow_test(mapping, len))
4272                 would_hit_hwbug = 1;
4273
4274         tg3_set_txd(tp, entry, mapping, len, base_flags,
4275                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4276
4277         entry = NEXT_TX(entry);
4278
4279         /* Now loop through additional data fragments, and queue them. */
4280         if (skb_shinfo(skb)->nr_frags > 0) {
4281                 unsigned int i, last;
4282
4283                 last = skb_shinfo(skb)->nr_frags - 1;
4284                 for (i = 0; i <= last; i++) {
4285                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4286
4287                         len = frag->size;
4288                         mapping = pci_map_page(tp->pdev,
4289                                                frag->page,
4290                                                frag->page_offset,
4291                                                len, PCI_DMA_TODEVICE);
4292
4293                         tp->tx_buffers[entry].skb = NULL;
4294                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4295
4296                         if (tg3_4g_overflow_test(mapping, len))
4297                                 would_hit_hwbug = 1;
4298
4299                         if (tg3_40bit_overflow_test(tp, mapping, len))
4300                                 would_hit_hwbug = 1;
4301
4302                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4303                                 tg3_set_txd(tp, entry, mapping, len,
4304                                             base_flags, (i == last)|(mss << 1));
4305                         else
4306                                 tg3_set_txd(tp, entry, mapping, len,
4307                                             base_flags, (i == last));
4308
4309                         entry = NEXT_TX(entry);
4310                 }
4311         }
4312
4313         if (would_hit_hwbug) {
4314                 u32 last_plus_one = entry;
4315                 u32 start;
4316
4317                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4318                 start &= (TG3_TX_RING_SIZE - 1);
4319
4320                 /* If the workaround fails due to memory/mapping
4321                  * failure, silently drop this packet.
4322                  */
4323                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4324                                                 &start, base_flags, mss))
4325                         goto out_unlock;
4326
4327                 entry = start;
4328         }
4329
4330         /* Packets are ready, update Tx producer idx local and on card. */
4331         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4332
4333         tp->tx_prod = entry;
4334         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4335                 netif_stop_queue(dev);
4336                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4337                         netif_wake_queue(tp->dev);
4338         }
4339
4340 out_unlock:
4341         mmiowb();
4342
4343         dev->trans_start = jiffies;
4344
4345         return NETDEV_TX_OK;
4346 }
4347
4348 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4349                                int new_mtu)
4350 {
4351         dev->mtu = new_mtu;
4352
4353         if (new_mtu > ETH_DATA_LEN) {
4354                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4355                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4356                         ethtool_op_set_tso(dev, 0);
4357                 }
4358                 else
4359                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4360         } else {
4361                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4362                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4363                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4364         }
4365 }
4366
4367 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4368 {
4369         struct tg3 *tp = netdev_priv(dev);
4370         int err;
4371
4372         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4373                 return -EINVAL;
4374
4375         if (!netif_running(dev)) {
4376                 /* We'll just catch it later when the
4377                  * device is up'd.
4378                  */
4379                 tg3_set_mtu(dev, tp, new_mtu);
4380                 return 0;
4381         }
4382
4383         tg3_netif_stop(tp);
4384
4385         tg3_full_lock(tp, 1);
4386
4387         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4388
4389         tg3_set_mtu(dev, tp, new_mtu);
4390
4391         err = tg3_restart_hw(tp, 0);
4392
4393         if (!err)
4394                 tg3_netif_start(tp);
4395
4396         tg3_full_unlock(tp);
4397
4398         return err;
4399 }
4400
4401 /* Free up pending packets in all rx/tx rings.
4402  *
4403  * The chip has been shut down and the driver detached from
4404  * the networking, so no interrupts or new tx packets will
4405  * end up in the driver.  tp->{tx,}lock is not held and we are not
4406  * in an interrupt context and thus may sleep.
4407  */
4408 static void tg3_free_rings(struct tg3 *tp)
4409 {
4410         struct ring_info *rxp;
4411         int i;
4412
4413         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4414                 rxp = &tp->rx_std_buffers[i];
4415
4416                 if (rxp->skb == NULL)
4417                         continue;
4418                 pci_unmap_single(tp->pdev,
4419                                  pci_unmap_addr(rxp, mapping),
4420                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4421                                  PCI_DMA_FROMDEVICE);
4422                 dev_kfree_skb_any(rxp->skb);
4423                 rxp->skb = NULL;
4424         }
4425
4426         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4427                 rxp = &tp->rx_jumbo_buffers[i];
4428
4429                 if (rxp->skb == NULL)
4430                         continue;
4431                 pci_unmap_single(tp->pdev,
4432                                  pci_unmap_addr(rxp, mapping),
4433                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4434                                  PCI_DMA_FROMDEVICE);
4435                 dev_kfree_skb_any(rxp->skb);
4436                 rxp->skb = NULL;
4437         }
4438
4439         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4440                 struct tx_ring_info *txp;
4441                 struct sk_buff *skb;
4442                 int j;
4443
4444                 txp = &tp->tx_buffers[i];
4445                 skb = txp->skb;
4446
4447                 if (skb == NULL) {
4448                         i++;
4449                         continue;
4450                 }
4451
4452                 pci_unmap_single(tp->pdev,
4453                                  pci_unmap_addr(txp, mapping),
4454                                  skb_headlen(skb),
4455                                  PCI_DMA_TODEVICE);
4456                 txp->skb = NULL;
4457
4458                 i++;
4459
4460                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4461                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4462                         pci_unmap_page(tp->pdev,
4463                                        pci_unmap_addr(txp, mapping),
4464                                        skb_shinfo(skb)->frags[j].size,
4465                                        PCI_DMA_TODEVICE);
4466                         i++;
4467                 }
4468
4469                 dev_kfree_skb_any(skb);
4470         }
4471 }
4472
4473 /* Initialize tx/rx rings for packet processing.
4474  *
4475  * The chip has been shut down and the driver detached from
4476  * the networking, so no interrupts or new tx packets will
4477  * end up in the driver.  tp->{tx,}lock are held and thus
4478  * we may not sleep.
4479  */
4480 static int tg3_init_rings(struct tg3 *tp)
4481 {
4482         u32 i;
4483
4484         /* Free up all the SKBs. */
4485         tg3_free_rings(tp);
4486
4487         /* Zero out all descriptors. */
4488         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4489         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4490         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4491         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4492
4493         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4494         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4495             (tp->dev->mtu > ETH_DATA_LEN))
4496                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4497
4498         /* Initialize invariants of the rings, we only set this
4499          * stuff once.  This works because the card does not
4500          * write into the rx buffer posting rings.
4501          */
4502         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4503                 struct tg3_rx_buffer_desc *rxd;
4504
4505                 rxd = &tp->rx_std[i];
4506                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4507                         << RXD_LEN_SHIFT;
4508                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4509                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4510                                (i << RXD_OPAQUE_INDEX_SHIFT));
4511         }
4512
4513         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4514                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4515                         struct tg3_rx_buffer_desc *rxd;
4516
4517                         rxd = &tp->rx_jumbo[i];
4518                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4519                                 << RXD_LEN_SHIFT;
4520                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4521                                 RXD_FLAG_JUMBO;
4522                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4523                                (i << RXD_OPAQUE_INDEX_SHIFT));
4524                 }
4525         }
4526
4527         /* Now allocate fresh SKBs for each rx ring. */
4528         for (i = 0; i < tp->rx_pending; i++) {
4529                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4530                         printk(KERN_WARNING PFX
4531                                "%s: Using a smaller RX standard ring, "
4532                                "only %d out of %d buffers were allocated "
4533                                "successfully.\n",
4534                                tp->dev->name, i, tp->rx_pending);
4535                         if (i == 0)
4536                                 return -ENOMEM;
4537                         tp->rx_pending = i;
4538                         break;
4539                 }
4540         }
4541
4542         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4543                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4544                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4545                                              -1, i) < 0) {
4546                                 printk(KERN_WARNING PFX
4547                                        "%s: Using a smaller RX jumbo ring, "
4548                                        "only %d out of %d buffers were "
4549                                        "allocated successfully.\n",
4550                                        tp->dev->name, i, tp->rx_jumbo_pending);
4551                                 if (i == 0) {
4552                                         tg3_free_rings(tp);
4553                                         return -ENOMEM;
4554                                 }
4555                                 tp->rx_jumbo_pending = i;
4556                                 break;
4557                         }
4558                 }
4559         }
4560         return 0;
4561 }
4562
4563 /*
4564  * Must not be invoked with interrupt sources disabled and
4565  * the hardware shutdown down.
4566  */
4567 static void tg3_free_consistent(struct tg3 *tp)
4568 {
4569         kfree(tp->rx_std_buffers);
4570         tp->rx_std_buffers = NULL;
4571         if (tp->rx_std) {
4572                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4573                                     tp->rx_std, tp->rx_std_mapping);
4574                 tp->rx_std = NULL;
4575         }
4576         if (tp->rx_jumbo) {
4577                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4578                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4579                 tp->rx_jumbo = NULL;
4580         }
4581         if (tp->rx_rcb) {
4582                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4583                                     tp->rx_rcb, tp->rx_rcb_mapping);
4584                 tp->rx_rcb = NULL;
4585         }
4586         if (tp->tx_ring) {
4587                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4588                         tp->tx_ring, tp->tx_desc_mapping);
4589                 tp->tx_ring = NULL;
4590         }
4591         if (tp->hw_status) {
4592                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4593                                     tp->hw_status, tp->status_mapping);
4594                 tp->hw_status = NULL;
4595         }
4596         if (tp->hw_stats) {
4597                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4598                                     tp->hw_stats, tp->stats_mapping);
4599                 tp->hw_stats = NULL;
4600         }
4601 }
4602
4603 /*
4604  * Must not be invoked with interrupt sources disabled and
4605  * the hardware shutdown down.  Can sleep.
4606  */
4607 static int tg3_alloc_consistent(struct tg3 *tp)
4608 {
4609         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4610                                       (TG3_RX_RING_SIZE +
4611                                        TG3_RX_JUMBO_RING_SIZE)) +
4612                                      (sizeof(struct tx_ring_info) *
4613                                       TG3_TX_RING_SIZE),
4614                                      GFP_KERNEL);
4615         if (!tp->rx_std_buffers)
4616                 return -ENOMEM;
4617
4618         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4619         tp->tx_buffers = (struct tx_ring_info *)
4620                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4621
4622         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4623                                           &tp->rx_std_mapping);
4624         if (!tp->rx_std)
4625                 goto err_out;
4626
4627         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4628                                             &tp->rx_jumbo_mapping);
4629
4630         if (!tp->rx_jumbo)
4631                 goto err_out;
4632
4633         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4634                                           &tp->rx_rcb_mapping);
4635         if (!tp->rx_rcb)
4636                 goto err_out;
4637
4638         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4639                                            &tp->tx_desc_mapping);
4640         if (!tp->tx_ring)
4641                 goto err_out;
4642
4643         tp->hw_status = pci_alloc_consistent(tp->pdev,
4644                                              TG3_HW_STATUS_SIZE,
4645                                              &tp->status_mapping);
4646         if (!tp->hw_status)
4647                 goto err_out;
4648
4649         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4650                                             sizeof(struct tg3_hw_stats),
4651                                             &tp->stats_mapping);
4652         if (!tp->hw_stats)
4653                 goto err_out;
4654
4655         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4656         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4657
4658         return 0;
4659
4660 err_out:
4661         tg3_free_consistent(tp);
4662         return -ENOMEM;
4663 }
4664
4665 #define MAX_WAIT_CNT 1000
4666
4667 /* To stop a block, clear the enable bit and poll till it
4668  * clears.  tp->lock is held.
4669  */
4670 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4671 {
4672         unsigned int i;
4673         u32 val;
4674
4675         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4676                 switch (ofs) {
4677                 case RCVLSC_MODE:
4678                 case DMAC_MODE:
4679                 case MBFREE_MODE:
4680                 case BUFMGR_MODE:
4681                 case MEMARB_MODE:
4682                         /* We can't enable/disable these bits of the
4683                          * 5705/5750, just say success.
4684                          */
4685                         return 0;
4686
4687                 default:
4688                         break;
4689                 };
4690         }
4691
4692         val = tr32(ofs);
4693         val &= ~enable_bit;
4694         tw32_f(ofs, val);
4695
4696         for (i = 0; i < MAX_WAIT_CNT; i++) {
4697                 udelay(100);
4698                 val = tr32(ofs);
4699                 if ((val & enable_bit) == 0)
4700                         break;
4701         }
4702
4703         if (i == MAX_WAIT_CNT && !silent) {
4704                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4705                        "ofs=%lx enable_bit=%x\n",
4706                        ofs, enable_bit);
4707                 return -ENODEV;
4708         }
4709
4710         return 0;
4711 }
4712
4713 /* tp->lock is held. */
4714 static int tg3_abort_hw(struct tg3 *tp, int silent)
4715 {
4716         int i, err;
4717
4718         tg3_disable_ints(tp);
4719
4720         tp->rx_mode &= ~RX_MODE_ENABLE;
4721         tw32_f(MAC_RX_MODE, tp->rx_mode);
4722         udelay(10);
4723
4724         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4725         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4726         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4727         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4728         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4729         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4730
4731         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4732         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4733         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4734         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4735         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4736         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4737         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4738
4739         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4740         tw32_f(MAC_MODE, tp->mac_mode);
4741         udelay(40);
4742
4743         tp->tx_mode &= ~TX_MODE_ENABLE;
4744         tw32_f(MAC_TX_MODE, tp->tx_mode);
4745
4746         for (i = 0; i < MAX_WAIT_CNT; i++) {
4747                 udelay(100);
4748                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4749                         break;
4750         }
4751         if (i >= MAX_WAIT_CNT) {
4752                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4753                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4754                        tp->dev->name, tr32(MAC_TX_MODE));
4755                 err |= -ENODEV;
4756         }
4757
4758         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4759         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4760         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4761
4762         tw32(FTQ_RESET, 0xffffffff);
4763         tw32(FTQ_RESET, 0x00000000);
4764
4765         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4766         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4767
4768         if (tp->hw_status)
4769                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4770         if (tp->hw_stats)
4771                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4772
4773         return err;
4774 }
4775
4776 /* tp->lock is held. */
4777 static int tg3_nvram_lock(struct tg3 *tp)
4778 {
4779         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4780                 int i;
4781
4782                 if (tp->nvram_lock_cnt == 0) {
4783                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4784                         for (i = 0; i < 8000; i++) {
4785                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4786                                         break;
4787                                 udelay(20);
4788                         }
4789                         if (i == 8000) {
4790                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4791                                 return -ENODEV;
4792                         }
4793                 }
4794                 tp->nvram_lock_cnt++;
4795         }
4796         return 0;
4797 }
4798
4799 /* tp->lock is held. */
4800 static void tg3_nvram_unlock(struct tg3 *tp)
4801 {
4802         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4803                 if (tp->nvram_lock_cnt > 0)
4804                         tp->nvram_lock_cnt--;
4805                 if (tp->nvram_lock_cnt == 0)
4806                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4807         }
4808 }
4809
4810 /* tp->lock is held. */
4811 static void tg3_enable_nvram_access(struct tg3 *tp)
4812 {
4813         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4814             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4815                 u32 nvaccess = tr32(NVRAM_ACCESS);
4816
4817                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4818         }
4819 }
4820
4821 /* tp->lock is held. */
4822 static void tg3_disable_nvram_access(struct tg3 *tp)
4823 {
4824         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4825             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4826                 u32 nvaccess = tr32(NVRAM_ACCESS);
4827
4828                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4829         }
4830 }
4831
4832 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4833 {
4834         int i;
4835         u32 apedata;
4836
4837         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4838         if (apedata != APE_SEG_SIG_MAGIC)
4839                 return;
4840
4841         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4842         if (apedata != APE_FW_STATUS_READY)
4843                 return;
4844
4845         /* Wait for up to 1 millisecond for APE to service previous event. */
4846         for (i = 0; i < 10; i++) {
4847                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4848                         return;
4849
4850                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4851
4852                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4853                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4854                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4855
4856                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4857
4858                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4859                         break;
4860
4861                 udelay(100);
4862         }
4863
4864         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4865                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4866 }
4867
4868 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4869 {
4870         u32 event;
4871         u32 apedata;
4872
4873         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4874                 return;
4875
4876         switch (kind) {
4877                 case RESET_KIND_INIT:
4878                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4879                                         APE_HOST_SEG_SIG_MAGIC);
4880                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4881                                         APE_HOST_SEG_LEN_MAGIC);
4882                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4883                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4884                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4885                                         APE_HOST_DRIVER_ID_MAGIC);
4886                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4887                                         APE_HOST_BEHAV_NO_PHYLOCK);
4888
4889                         event = APE_EVENT_STATUS_STATE_START;
4890                         break;
4891                 case RESET_KIND_SHUTDOWN:
4892                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4893                         break;
4894                 case RESET_KIND_SUSPEND:
4895                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4896                         break;
4897                 default:
4898                         return;
4899         }
4900
4901         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4902
4903         tg3_ape_send_event(tp, event);
4904 }
4905
4906 /* tp->lock is held. */
4907 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4908 {
4909         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4910                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4911
4912         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4913                 switch (kind) {
4914                 case RESET_KIND_INIT:
4915                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4916                                       DRV_STATE_START);
4917                         break;
4918
4919                 case RESET_KIND_SHUTDOWN:
4920                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4921                                       DRV_STATE_UNLOAD);
4922                         break;
4923
4924                 case RESET_KIND_SUSPEND:
4925                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4926                                       DRV_STATE_SUSPEND);
4927                         break;
4928
4929                 default:
4930                         break;
4931                 };
4932         }
4933
4934         if (kind == RESET_KIND_INIT ||
4935             kind == RESET_KIND_SUSPEND)
4936                 tg3_ape_driver_state_change(tp, kind);
4937 }
4938
4939 /* tp->lock is held. */
4940 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4941 {
4942         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4943                 switch (kind) {
4944                 case RESET_KIND_INIT:
4945                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4946                                       DRV_STATE_START_DONE);
4947                         break;
4948
4949                 case RESET_KIND_SHUTDOWN:
4950                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4951                                       DRV_STATE_UNLOAD_DONE);
4952                         break;
4953
4954                 default:
4955                         break;
4956                 };
4957         }
4958
4959         if (kind == RESET_KIND_SHUTDOWN)
4960                 tg3_ape_driver_state_change(tp, kind);
4961 }
4962
4963 /* tp->lock is held. */
4964 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4965 {
4966         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4967                 switch (kind) {
4968                 case RESET_KIND_INIT:
4969                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4970                                       DRV_STATE_START);
4971                         break;
4972
4973                 case RESET_KIND_SHUTDOWN:
4974                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4975                                       DRV_STATE_UNLOAD);
4976                         break;
4977
4978                 case RESET_KIND_SUSPEND:
4979                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4980                                       DRV_STATE_SUSPEND);
4981                         break;
4982
4983                 default:
4984                         break;
4985                 };
4986         }
4987 }
4988
4989 static int tg3_poll_fw(struct tg3 *tp)
4990 {
4991         int i;
4992         u32 val;
4993
4994         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4995                 /* Wait up to 20ms for init done. */
4996                 for (i = 0; i < 200; i++) {
4997                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4998                                 return 0;
4999                         udelay(100);
5000                 }
5001                 return -ENODEV;
5002         }
5003
5004         /* Wait for firmware initialization to complete. */
5005         for (i = 0; i < 100000; i++) {
5006                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5007                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5008                         break;
5009                 udelay(10);
5010         }
5011
5012         /* Chip might not be fitted with firmware.  Some Sun onboard
5013          * parts are configured like that.  So don't signal the timeout
5014          * of the above loop as an error, but do report the lack of
5015          * running firmware once.
5016          */
5017         if (i >= 100000 &&
5018             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5019                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5020
5021                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5022                        tp->dev->name);
5023         }
5024
5025         return 0;
5026 }
5027
5028 /* Save PCI command register before chip reset */
5029 static void tg3_save_pci_state(struct tg3 *tp)
5030 {
5031         u32 val;
5032
5033         pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
5034         tp->pci_cmd = val;
5035 }
5036
5037 /* Restore PCI state after chip reset */
5038 static void tg3_restore_pci_state(struct tg3 *tp)
5039 {
5040         u32 val;
5041
5042         /* Re-enable indirect register accesses. */
5043         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5044                                tp->misc_host_ctrl);
5045
5046         /* Set MAX PCI retry to zero. */
5047         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5048         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5049             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5050                 val |= PCISTATE_RETRY_SAME_DMA;
5051         /* Allow reads and writes to the APE register and memory space. */
5052         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5053                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5054                        PCISTATE_ALLOW_APE_SHMEM_WR;
5055         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5056
5057         pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
5058
5059         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5060                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5061                                       tp->pci_cacheline_sz);
5062                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5063                                       tp->pci_lat_timer);
5064         }
5065         /* Make sure PCI-X relaxed ordering bit is clear. */
5066         if (tp->pcix_cap) {
5067                 u16 pcix_cmd;
5068
5069                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5070                                      &pcix_cmd);
5071                 pcix_cmd &= ~PCI_X_CMD_ERO;
5072                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5073                                       pcix_cmd);
5074         }
5075
5076         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5077
5078                 /* Chip reset on 5780 will reset MSI enable bit,
5079                  * so need to restore it.
5080                  */
5081                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5082                         u16 ctrl;
5083
5084                         pci_read_config_word(tp->pdev,
5085                                              tp->msi_cap + PCI_MSI_FLAGS,
5086                                              &ctrl);
5087                         pci_write_config_word(tp->pdev,
5088                                               tp->msi_cap + PCI_MSI_FLAGS,
5089                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5090                         val = tr32(MSGINT_MODE);
5091                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5092                 }
5093         }
5094 }
5095
5096 static void tg3_stop_fw(struct tg3 *);
5097
5098 /* tp->lock is held. */
5099 static int tg3_chip_reset(struct tg3 *tp)
5100 {
5101         u32 val;
5102         void (*write_op)(struct tg3 *, u32, u32);
5103         int err;
5104
5105         tg3_nvram_lock(tp);
5106
5107         /* No matching tg3_nvram_unlock() after this because
5108          * chip reset below will undo the nvram lock.
5109          */
5110         tp->nvram_lock_cnt = 0;
5111
5112         /* GRC_MISC_CFG core clock reset will clear the memory
5113          * enable bit in PCI register 4 and the MSI enable bit
5114          * on some chips, so we save relevant registers here.
5115          */
5116         tg3_save_pci_state(tp);
5117
5118         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5119             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5120             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5121             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5122             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5123                 tw32(GRC_FASTBOOT_PC, 0);
5124
5125         /*
5126          * We must avoid the readl() that normally takes place.
5127          * It locks machines, causes machine checks, and other
5128          * fun things.  So, temporarily disable the 5701
5129          * hardware workaround, while we do the reset.
5130          */
5131         write_op = tp->write32;
5132         if (write_op == tg3_write_flush_reg32)
5133                 tp->write32 = tg3_write32;
5134
5135         /* Prevent the irq handler from reading or writing PCI registers
5136          * during chip reset when the memory enable bit in the PCI command
5137          * register may be cleared.  The chip does not generate interrupt
5138          * at this time, but the irq handler may still be called due to irq
5139          * sharing or irqpoll.
5140          */
5141         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5142         if (tp->hw_status) {
5143                 tp->hw_status->status = 0;
5144                 tp->hw_status->status_tag = 0;
5145         }
5146         tp->last_tag = 0;
5147         smp_mb();
5148         synchronize_irq(tp->pdev->irq);
5149
5150         /* do the reset */
5151         val = GRC_MISC_CFG_CORECLK_RESET;
5152
5153         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5154                 if (tr32(0x7e2c) == 0x60) {
5155                         tw32(0x7e2c, 0x20);
5156                 }
5157                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5158                         tw32(GRC_MISC_CFG, (1 << 29));
5159                         val |= (1 << 29);
5160                 }
5161         }
5162
5163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5164                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5165                 tw32(GRC_VCPU_EXT_CTRL,
5166                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5167         }
5168
5169         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5170                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5171         tw32(GRC_MISC_CFG, val);
5172
5173         /* restore 5701 hardware bug workaround write method */
5174         tp->write32 = write_op;
5175
5176         /* Unfortunately, we have to delay before the PCI read back.
5177          * Some 575X chips even will not respond to a PCI cfg access
5178          * when the reset command is given to the chip.
5179          *
5180          * How do these hardware designers expect things to work
5181          * properly if the PCI write is posted for a long period
5182          * of time?  It is always necessary to have some method by
5183          * which a register read back can occur to push the write
5184          * out which does the reset.
5185          *
5186          * For most tg3 variants the trick below was working.
5187          * Ho hum...
5188          */
5189         udelay(120);
5190
5191         /* Flush PCI posted writes.  The normal MMIO registers
5192          * are inaccessible at this time so this is the only
5193          * way to make this reliably (actually, this is no longer
5194          * the case, see above).  I tried to use indirect
5195          * register read/write but this upset some 5701 variants.
5196          */
5197         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5198
5199         udelay(120);
5200
5201         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5202                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5203                         int i;
5204                         u32 cfg_val;
5205
5206                         /* Wait for link training to complete.  */
5207                         for (i = 0; i < 5000; i++)
5208                                 udelay(100);
5209
5210                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5211                         pci_write_config_dword(tp->pdev, 0xc4,
5212                                                cfg_val | (1 << 15));
5213                 }
5214                 /* Set PCIE max payload size and clear error status.  */
5215                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5216         }
5217
5218         tg3_restore_pci_state(tp);
5219
5220         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5221
5222         val = 0;
5223         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5224                 val = tr32(MEMARB_MODE);
5225         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5226
5227         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5228                 tg3_stop_fw(tp);
5229                 tw32(0x5000, 0x400);
5230         }
5231
5232         tw32(GRC_MODE, tp->grc_mode);
5233
5234         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5235                 val = tr32(0xc4);
5236
5237                 tw32(0xc4, val | (1 << 15));
5238         }
5239
5240         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5241             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5242                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5243                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5244                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5245                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5246         }
5247
5248         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5249                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5250                 tw32_f(MAC_MODE, tp->mac_mode);
5251         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5252                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5253                 tw32_f(MAC_MODE, tp->mac_mode);
5254         } else
5255                 tw32_f(MAC_MODE, 0);
5256         udelay(40);
5257
5258         err = tg3_poll_fw(tp);
5259         if (err)
5260                 return err;
5261
5262         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5263             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5264                 val = tr32(0x7c00);
5265
5266                 tw32(0x7c00, val | (1 << 25));
5267         }
5268
5269         /* Reprobe ASF enable state.  */
5270         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5271         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5272         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5273         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5274                 u32 nic_cfg;
5275
5276                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5277                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5278                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5279                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5280                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5281                 }
5282         }
5283
5284         return 0;
5285 }
5286
5287 /* tp->lock is held. */
5288 static void tg3_stop_fw(struct tg3 *tp)
5289 {
5290         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5291            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5292                 u32 val;
5293                 int i;
5294
5295                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5296                 val = tr32(GRC_RX_CPU_EVENT);
5297                 val |= (1 << 14);
5298                 tw32(GRC_RX_CPU_EVENT, val);
5299
5300                 /* Wait for RX cpu to ACK the event.  */
5301                 for (i = 0; i < 100; i++) {
5302                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5303                                 break;
5304                         udelay(1);
5305                 }
5306         }
5307 }
5308
5309 /* tp->lock is held. */
5310 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5311 {
5312         int err;
5313
5314         tg3_stop_fw(tp);
5315
5316         tg3_write_sig_pre_reset(tp, kind);
5317
5318         tg3_abort_hw(tp, silent);
5319         err = tg3_chip_reset(tp);
5320
5321         tg3_write_sig_legacy(tp, kind);
5322         tg3_write_sig_post_reset(tp, kind);
5323
5324         if (err)
5325                 return err;
5326
5327         return 0;
5328 }
5329
5330 #define TG3_FW_RELEASE_MAJOR    0x0
5331 #define TG3_FW_RELASE_MINOR     0x0
5332 #define TG3_FW_RELEASE_FIX      0x0
5333 #define TG3_FW_START_ADDR       0x08000000
5334 #define TG3_FW_TEXT_ADDR        0x08000000
5335 #define TG3_FW_TEXT_LEN         0x9c0
5336 #define TG3_FW_RODATA_ADDR      0x080009c0
5337 #define TG3_FW_RODATA_LEN       0x60
5338 #define TG3_FW_DATA_ADDR        0x08000a40
5339 #define TG3_FW_DATA_LEN         0x20
5340 #define TG3_FW_SBSS_ADDR        0x08000a60
5341 #define TG3_FW_SBSS_LEN         0xc
5342 #define TG3_FW_BSS_ADDR         0x08000a70
5343 #define TG3_FW_BSS_LEN          0x10
5344
5345 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5346         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5347         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5348         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5349         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5350         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5351         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5352         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5353         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5354         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5355         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5356         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5357         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5358         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5359         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5360         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5361         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5362         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5363         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5364         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5365         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5366         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5367         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5368         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5369         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5370         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5371         0, 0, 0, 0, 0, 0,
5372         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5373         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5374         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5375         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5376         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5377         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5378         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5379         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5380         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5381         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5382         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5383         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5384         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5385         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5386         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5387         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5388         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5389         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5390         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5391         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5392         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5393         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5394         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5395         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5396         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5397         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5398         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5399         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5400         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5401         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5402         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5403         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5404         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5405         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5406         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5407         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5408         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5409         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5410         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5411         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5412         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5413         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5414         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5415         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5416         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5417         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5418         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5419         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5420         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5421         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5422         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5423         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5424         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5425         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5426         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5427         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5428         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5429         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5430         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5431         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5432         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5433         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5434         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5435         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5436         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5437 };
5438
5439 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5440         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5441         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5442         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5443         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5444         0x00000000
5445 };
5446
5447 #if 0 /* All zeros, don't eat up space with it. */
5448 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5449         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5450         0x00000000, 0x00000000, 0x00000000, 0x00000000
5451 };
5452 #endif
5453
5454 #define RX_CPU_SCRATCH_BASE     0x30000
5455 #define RX_CPU_SCRATCH_SIZE     0x04000
5456 #define TX_CPU_SCRATCH_BASE     0x34000
5457 #define TX_CPU_SCRATCH_SIZE     0x04000
5458
5459 /* tp->lock is held. */
5460 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5461 {
5462         int i;
5463
5464         BUG_ON(offset == TX_CPU_BASE &&
5465             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5466
5467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5468                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5469
5470                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5471                 return 0;
5472         }
5473         if (offset == RX_CPU_BASE) {
5474                 for (i = 0; i < 10000; i++) {
5475                         tw32(offset + CPU_STATE, 0xffffffff);
5476                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5477                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5478                                 break;
5479                 }
5480
5481                 tw32(offset + CPU_STATE, 0xffffffff);
5482                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5483                 udelay(10);
5484         } else {
5485                 for (i = 0; i < 10000; i++) {
5486                         tw32(offset + CPU_STATE, 0xffffffff);
5487                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5488                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5489                                 break;
5490                 }
5491         }
5492
5493         if (i >= 10000) {
5494                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5495                        "and %s CPU\n",
5496                        tp->dev->name,
5497                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5498                 return -ENODEV;
5499         }
5500
5501         /* Clear firmware's nvram arbitration. */
5502         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5503                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5504         return 0;
5505 }
5506
5507 struct fw_info {
5508         unsigned int text_base;
5509         unsigned int text_len;
5510         const u32 *text_data;
5511         unsigned int rodata_base;
5512         unsigned int rodata_len;
5513         const u32 *rodata_data;
5514         unsigned int data_base;
5515         unsigned int data_len;
5516         const u32 *data_data;
5517 };
5518
5519 /* tp->lock is held. */
5520 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5521                                  int cpu_scratch_size, struct fw_info *info)
5522 {
5523         int err, lock_err, i;
5524         void (*write_op)(struct tg3 *, u32, u32);
5525
5526         if (cpu_base == TX_CPU_BASE &&
5527             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5528                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5529                        "TX cpu firmware on %s which is 5705.\n",
5530                        tp->dev->name);
5531                 return -EINVAL;
5532         }
5533
5534         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5535                 write_op = tg3_write_mem;
5536         else
5537                 write_op = tg3_write_indirect_reg32;
5538
5539         /* It is possible that bootcode is still loading at this point.
5540          * Get the nvram lock first before halting the cpu.
5541          */
5542         lock_err = tg3_nvram_lock(tp);
5543         err = tg3_halt_cpu(tp, cpu_base);
5544         if (!lock_err)
5545                 tg3_nvram_unlock(tp);
5546         if (err)
5547                 goto out;
5548
5549         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5550                 write_op(tp, cpu_scratch_base + i, 0);
5551         tw32(cpu_base + CPU_STATE, 0xffffffff);
5552         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5553         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5554                 write_op(tp, (cpu_scratch_base +
5555                               (info->text_base & 0xffff) +
5556                               (i * sizeof(u32))),
5557                          (info->text_data ?
5558                           info->text_data[i] : 0));
5559         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5560                 write_op(tp, (cpu_scratch_base +
5561                               (info->rodata_base & 0xffff) +
5562                               (i * sizeof(u32))),
5563                          (info->rodata_data ?
5564                           info->rodata_data[i] : 0));
5565         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5566                 write_op(tp, (cpu_scratch_base +
5567                               (info->data_base & 0xffff) +
5568                               (i * sizeof(u32))),
5569                          (info->data_data ?
5570                           info->data_data[i] : 0));
5571
5572         err = 0;
5573
5574 out:
5575         return err;
5576 }
5577
5578 /* tp->lock is held. */
5579 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5580 {
5581         struct fw_info info;
5582         int err, i;
5583
5584         info.text_base = TG3_FW_TEXT_ADDR;
5585         info.text_len = TG3_FW_TEXT_LEN;
5586         info.text_data = &tg3FwText[0];
5587         info.rodata_base = TG3_FW_RODATA_ADDR;
5588         info.rodata_len = TG3_FW_RODATA_LEN;
5589         info.rodata_data = &tg3FwRodata[0];
5590         info.data_base = TG3_FW_DATA_ADDR;
5591         info.data_len = TG3_FW_DATA_LEN;
5592         info.data_data = NULL;
5593
5594         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5595                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5596                                     &info);
5597         if (err)
5598                 return err;
5599
5600         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5601                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5602                                     &info);
5603         if (err)
5604                 return err;
5605
5606         /* Now startup only the RX cpu. */
5607         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5608         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5609
5610         for (i = 0; i < 5; i++) {
5611                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5612                         break;
5613                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5614                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5615                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5616                 udelay(1000);
5617         }
5618         if (i >= 5) {
5619                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5620                        "to set RX CPU PC, is %08x should be %08x\n",
5621                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5622                        TG3_FW_TEXT_ADDR);
5623                 return -ENODEV;
5624         }
5625         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5626         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5627
5628         return 0;
5629 }
5630
5631
5632 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5633 #define TG3_TSO_FW_RELASE_MINOR         0x6
5634 #define TG3_TSO_FW_RELEASE_FIX          0x0
5635 #define TG3_TSO_FW_START_ADDR           0x08000000
5636 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5637 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5638 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5639 #define TG3_TSO_FW_RODATA_LEN           0x60
5640 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5641 #define TG3_TSO_FW_DATA_LEN             0x30
5642 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5643 #define TG3_TSO_FW_SBSS_LEN             0x2c
5644 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5645 #define TG3_TSO_FW_BSS_LEN              0x894
5646
5647 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5648         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5649         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5650         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5651         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5652         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5653         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5654         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5655         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5656         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5657         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5658         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5659         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5660         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5661         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5662         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5663         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5664         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5665         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5666         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5667         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5668         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5669         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5670         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5671         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5672         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5673         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5674         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5675         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5676         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5677         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5678         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5679         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5680         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5681         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5682         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5683         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5684         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5685         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5686         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5687         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5688         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5689         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5690         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5691         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5692         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5693         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5694         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5695         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5696         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5697         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5698         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5699         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5700         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5701         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5702         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5703         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5704         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5705         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5706         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5707         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5708         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5709         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5710         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5711         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5712         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5713         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5714         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5715         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5716         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5717         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5718         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5719         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5720         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5721         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5722         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5723         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5724         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5725         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5726         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5727         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5728         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5729         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5730         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5731         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5732         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5733         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5734         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5735         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5736         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5737         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5738         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5739         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5740         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5741         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5742         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5743         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5744         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5745         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5746         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5747         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5748         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5749         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5750         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5751         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5752         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5753         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5754         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5755         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5756         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5757         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5758         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5759         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5760         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5761         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5762         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5763         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5764         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5765         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5766         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5767         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5768         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5769         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5770         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5771         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5772         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5773         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5774         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5775         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5776         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5777         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5778         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5779         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5780         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5781         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5782         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5783         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5784         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5785         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5786         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5787         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5788         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5789         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5790         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5791         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5792         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5793         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5794         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5795         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5796         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5797         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5798         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5799         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5800         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5801         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5802         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5803         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5804         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5805         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5806         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5807         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5808         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5809         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5810         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5811         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5812         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5813         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5814         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5815         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5816         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5817         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5818         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5819         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5820         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5821         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5822         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5823         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5824         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5825         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5826         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5827         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5828         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5829         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5830         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5831         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5832         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5833         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5834         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5835         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5836         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5837         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5838         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5839         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5840         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5841         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5842         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5843         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5844         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5845         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5846         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5847         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5848         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5849         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5850         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5851         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5852         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5853         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5854         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5855         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5856         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5857         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5858         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5859         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5860         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5861         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5862         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5863         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5864         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5865         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5866         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5867         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5868         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5869         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5870         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5871         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5872         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5873         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5874         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5875         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5876         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5877         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5878         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5879         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5880         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5881         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5882         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5883         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5884         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5885         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5886         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5887         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5888         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5889         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5890         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5891         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5892         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5893         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5894         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5895         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5896         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5897         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5898         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5899         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5900         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5901         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5902         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5903         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5904         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5905         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5906         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5907         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5908         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5909         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5910         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5911         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5912         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5913         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5914         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5915         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5916         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5917         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5918         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5919         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5920         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5921         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5922         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5923         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5924         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5925         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5926         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5927         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5928         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5929         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5930         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5931         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5932 };
5933
5934 static const u32 tg3TsoFwRodata[] = {
5935         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5936         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5937         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5938         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5939         0x00000000,
5940 };
5941
5942 static const u32 tg3TsoFwData[] = {
5943         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5944         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5945         0x00000000,
5946 };
5947
5948 /* 5705 needs a special version of the TSO firmware.  */
5949 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5950 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5951 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5952 #define TG3_TSO5_FW_START_ADDR          0x00010000
5953 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5954 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5955 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5956 #define TG3_TSO5_FW_RODATA_LEN          0x50
5957 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5958 #define TG3_TSO5_FW_DATA_LEN            0x20
5959 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5960 #define TG3_TSO5_FW_SBSS_LEN            0x28
5961 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5962 #define TG3_TSO5_FW_BSS_LEN             0x88
5963
5964 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5965         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5966         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5967         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5968         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5969         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5970         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5971         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5972         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5973         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5974         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5975         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5976         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5977         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5978         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5979         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5980         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5981         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5982         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5983         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5984         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5985         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5986         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5987         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5988         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5989         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5990         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5991         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5992         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5993         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5994         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5995         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5996         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5997         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5998         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5999         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6000         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6001         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6002         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6003         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6004         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6005         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6006         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6007         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6008         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6009         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6010         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6011         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6012         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6013         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6014         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6015         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6016         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6017         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6018         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6019         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6020         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6021         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6022         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6023         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6024         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6025         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6026         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6027         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6028         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6029         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6030         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6031         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6032         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6033         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6034         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6035         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6036         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6037         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6038         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6039         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6040         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6041         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6042         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6043         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6044         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6045         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6046         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6047         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6048         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6049         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6050         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6051         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6052         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6053         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6054         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6055         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6056         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6057         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6058         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6059         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6060         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6061         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6062         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6063         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6064         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6065         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6066         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6067         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6068         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6069         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6070         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6071         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6072         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6073         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6074         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6075         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6076         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6077         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6078         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6079         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6080         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6081         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6082         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6083         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6084         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6085         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6086         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6087         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6088         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6089         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6090         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6091         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6092         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6093         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6094         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6095         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6096         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6097         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6098         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6099         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6100         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6101         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6102         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6103         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6104         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6105         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6106         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6107         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6108         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6109         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6110         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6111         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6112         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6113         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6114         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6115         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6116         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6117         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6118         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6119         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6120         0x00000000, 0x00000000, 0x00000000,
6121 };
6122
6123 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6124         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6125         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6126         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6127         0x00000000, 0x00000000, 0x00000000,
6128 };
6129
6130 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6131         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6132         0x00000000, 0x00000000, 0x00000000,
6133 };
6134
6135 /* tp->lock is held. */
6136 static int tg3_load_tso_firmware(struct tg3 *tp)
6137 {
6138         struct fw_info info;
6139         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6140         int err, i;
6141
6142         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6143                 return 0;
6144
6145         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6146                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6147                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6148                 info.text_data = &tg3Tso5FwText[0];
6149                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6150                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6151                 info.rodata_data = &tg3Tso5FwRodata[0];
6152                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6153                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6154                 info.data_data = &tg3Tso5FwData[0];
6155                 cpu_base = RX_CPU_BASE;
6156                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6157                 cpu_scratch_size = (info.text_len +
6158                                     info.rodata_len +
6159                                     info.data_len +
6160                                     TG3_TSO5_FW_SBSS_LEN +
6161                                     TG3_TSO5_FW_BSS_LEN);
6162         } else {
6163                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6164                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6165                 info.text_data = &tg3TsoFwText[0];
6166                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6167                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6168                 info.rodata_data = &tg3TsoFwRodata[0];
6169                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6170                 info.data_len = TG3_TSO_FW_DATA_LEN;
6171                 info.data_data = &tg3TsoFwData[0];
6172                 cpu_base = TX_CPU_BASE;
6173                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6174                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6175         }
6176
6177         err = tg3_load_firmware_cpu(tp, cpu_base,
6178                                     cpu_scratch_base, cpu_scratch_size,
6179                                     &info);
6180         if (err)
6181                 return err;
6182
6183         /* Now startup the cpu. */
6184         tw32(cpu_base + CPU_STATE, 0xffffffff);
6185         tw32_f(cpu_base + CPU_PC,    info.text_base);
6186
6187         for (i = 0; i < 5; i++) {
6188                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6189                         break;
6190                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6191                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6192                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6193                 udelay(1000);
6194         }
6195         if (i >= 5) {
6196                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6197                        "to set CPU PC, is %08x should be %08x\n",
6198                        tp->dev->name, tr32(cpu_base + CPU_PC),
6199                        info.text_base);
6200                 return -ENODEV;
6201         }
6202         tw32(cpu_base + CPU_STATE, 0xffffffff);
6203         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6204         return 0;
6205 }
6206
6207
6208 /* tp->lock is held. */
6209 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6210 {
6211         u32 addr_high, addr_low;
6212         int i;
6213
6214         addr_high = ((tp->dev->dev_addr[0] << 8) |
6215                      tp->dev->dev_addr[1]);
6216         addr_low = ((tp->dev->dev_addr[2] << 24) |
6217                     (tp->dev->dev_addr[3] << 16) |
6218                     (tp->dev->dev_addr[4] <<  8) |
6219                     (tp->dev->dev_addr[5] <<  0));
6220         for (i = 0; i < 4; i++) {
6221                 if (i == 1 && skip_mac_1)
6222                         continue;
6223                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6224                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6225         }
6226
6227         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6228             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6229                 for (i = 0; i < 12; i++) {
6230                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6231                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6232                 }
6233         }
6234
6235         addr_high = (tp->dev->dev_addr[0] +
6236                      tp->dev->dev_addr[1] +
6237                      tp->dev->dev_addr[2] +
6238                      tp->dev->dev_addr[3] +
6239                      tp->dev->dev_addr[4] +
6240                      tp->dev->dev_addr[5]) &
6241                 TX_BACKOFF_SEED_MASK;
6242         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6243 }
6244
6245 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6246 {
6247         struct tg3 *tp = netdev_priv(dev);
6248         struct sockaddr *addr = p;
6249         int err = 0, skip_mac_1 = 0;
6250
6251         if (!is_valid_ether_addr(addr->sa_data))
6252                 return -EINVAL;
6253
6254         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6255
6256         if (!netif_running(dev))
6257                 return 0;
6258
6259         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6260                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6261
6262                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6263                 addr0_low = tr32(MAC_ADDR_0_LOW);
6264                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6265                 addr1_low = tr32(MAC_ADDR_1_LOW);
6266
6267                 /* Skip MAC addr 1 if ASF is using it. */
6268                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6269                     !(addr1_high == 0 && addr1_low == 0))
6270                         skip_mac_1 = 1;
6271         }
6272         spin_lock_bh(&tp->lock);
6273         __tg3_set_mac_addr(tp, skip_mac_1);
6274         spin_unlock_bh(&tp->lock);
6275
6276         return err;
6277 }
6278
6279 /* tp->lock is held. */
6280 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6281                            dma_addr_t mapping, u32 maxlen_flags,
6282                            u32 nic_addr)
6283 {
6284         tg3_write_mem(tp,
6285                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6286                       ((u64) mapping >> 32));
6287         tg3_write_mem(tp,
6288                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6289                       ((u64) mapping & 0xffffffff));
6290         tg3_write_mem(tp,
6291                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6292                        maxlen_flags);
6293
6294         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6295                 tg3_write_mem(tp,
6296                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6297                               nic_addr);
6298 }
6299
6300 static void __tg3_set_rx_mode(struct net_device *);
6301 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6302 {
6303         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6304         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6305         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6306         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6307         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6308                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6309                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6310         }
6311         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6312         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6313         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6314                 u32 val = ec->stats_block_coalesce_usecs;
6315
6316                 if (!netif_carrier_ok(tp->dev))
6317                         val = 0;
6318
6319                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6320         }
6321 }
6322
6323 /* tp->lock is held. */
6324 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6325 {
6326         u32 val, rdmac_mode;
6327         int i, err, limit;
6328
6329         tg3_disable_ints(tp);
6330
6331         tg3_stop_fw(tp);
6332
6333         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6334
6335         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6336                 tg3_abort_hw(tp, 1);
6337         }
6338
6339         if (reset_phy)
6340                 tg3_phy_reset(tp);
6341
6342         err = tg3_chip_reset(tp);
6343         if (err)
6344                 return err;
6345
6346         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6347
6348         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6349                 val = tr32(TG3_CPMU_CTRL);
6350                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6351                 tw32(TG3_CPMU_CTRL, val);
6352         }
6353
6354         /* This works around an issue with Athlon chipsets on
6355          * B3 tigon3 silicon.  This bit has no effect on any
6356          * other revision.  But do not set this on PCI Express
6357          * chips and don't even touch the clocks if the CPMU is present.
6358          */
6359         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6360                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6361                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6362                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6363         }
6364
6365         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6366             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6367                 val = tr32(TG3PCI_PCISTATE);
6368                 val |= PCISTATE_RETRY_SAME_DMA;
6369                 tw32(TG3PCI_PCISTATE, val);
6370         }
6371
6372         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6373                 /* Allow reads and writes to the
6374                  * APE register and memory space.
6375                  */
6376                 val = tr32(TG3PCI_PCISTATE);
6377                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6378                        PCISTATE_ALLOW_APE_SHMEM_WR;
6379                 tw32(TG3PCI_PCISTATE, val);
6380         }
6381
6382         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6383                 /* Enable some hw fixes.  */
6384                 val = tr32(TG3PCI_MSI_DATA);
6385                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6386                 tw32(TG3PCI_MSI_DATA, val);
6387         }
6388
6389         /* Descriptor ring init may make accesses to the
6390          * NIC SRAM area to setup the TX descriptors, so we
6391          * can only do this after the hardware has been
6392          * successfully reset.
6393          */
6394         err = tg3_init_rings(tp);
6395         if (err)
6396                 return err;
6397
6398         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6399             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6400                 /* This value is determined during the probe time DMA
6401                  * engine test, tg3_test_dma.
6402                  */
6403                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6404         }
6405
6406         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6407                           GRC_MODE_4X_NIC_SEND_RINGS |
6408                           GRC_MODE_NO_TX_PHDR_CSUM |
6409                           GRC_MODE_NO_RX_PHDR_CSUM);
6410         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6411
6412         /* Pseudo-header checksum is done by hardware logic and not
6413          * the offload processers, so make the chip do the pseudo-
6414          * header checksums on receive.  For transmit it is more
6415          * convenient to do the pseudo-header checksum in software
6416          * as Linux does that on transmit for us in all cases.
6417          */
6418         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6419
6420         tw32(GRC_MODE,
6421              tp->grc_mode |
6422              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6423
6424         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6425         val = tr32(GRC_MISC_CFG);
6426         val &= ~0xff;
6427         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6428         tw32(GRC_MISC_CFG, val);
6429
6430         /* Initialize MBUF/DESC pool. */
6431         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6432                 /* Do nothing.  */
6433         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6434                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6435                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6436                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6437                 else
6438                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6439                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6440                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6441         }
6442         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6443                 int fw_len;
6444
6445                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6446                           TG3_TSO5_FW_RODATA_LEN +
6447                           TG3_TSO5_FW_DATA_LEN +
6448                           TG3_TSO5_FW_SBSS_LEN +
6449                           TG3_TSO5_FW_BSS_LEN);
6450                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6451                 tw32(BUFMGR_MB_POOL_ADDR,
6452                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6453                 tw32(BUFMGR_MB_POOL_SIZE,
6454                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6455         }
6456
6457         if (tp->dev->mtu <= ETH_DATA_LEN) {
6458                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6459                      tp->bufmgr_config.mbuf_read_dma_low_water);
6460                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6461                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6462                 tw32(BUFMGR_MB_HIGH_WATER,
6463                      tp->bufmgr_config.mbuf_high_water);
6464         } else {
6465                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6466                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6467                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6468                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6469                 tw32(BUFMGR_MB_HIGH_WATER,
6470                      tp->bufmgr_config.mbuf_high_water_jumbo);
6471         }
6472         tw32(BUFMGR_DMA_LOW_WATER,
6473              tp->bufmgr_config.dma_low_water);
6474         tw32(BUFMGR_DMA_HIGH_WATER,
6475              tp->bufmgr_config.dma_high_water);
6476
6477         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6478         for (i = 0; i < 2000; i++) {
6479                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6480                         break;
6481                 udelay(10);
6482         }
6483         if (i >= 2000) {
6484                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6485                        tp->dev->name);
6486                 return -ENODEV;
6487         }
6488
6489         /* Setup replenish threshold. */
6490         val = tp->rx_pending / 8;
6491         if (val == 0)
6492                 val = 1;
6493         else if (val > tp->rx_std_max_post)
6494                 val = tp->rx_std_max_post;
6495         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6496                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6497                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6498
6499                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6500                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6501         }
6502
6503         tw32(RCVBDI_STD_THRESH, val);
6504
6505         /* Initialize TG3_BDINFO's at:
6506          *  RCVDBDI_STD_BD:     standard eth size rx ring
6507          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6508          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6509          *
6510          * like so:
6511          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6512          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6513          *                              ring attribute flags
6514          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6515          *
6516          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6517          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6518          *
6519          * The size of each ring is fixed in the firmware, but the location is
6520          * configurable.
6521          */
6522         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6523              ((u64) tp->rx_std_mapping >> 32));
6524         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6525              ((u64) tp->rx_std_mapping & 0xffffffff));
6526         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6527              NIC_SRAM_RX_BUFFER_DESC);
6528
6529         /* Don't even try to program the JUMBO/MINI buffer descriptor
6530          * configs on 5705.
6531          */
6532         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6533                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6534                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6535         } else {
6536                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6537                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6538
6539                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6540                      BDINFO_FLAGS_DISABLED);
6541
6542                 /* Setup replenish threshold. */
6543                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6544
6545                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6546                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6547                              ((u64) tp->rx_jumbo_mapping >> 32));
6548                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6549                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6550                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6551                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6552                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6553                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6554                 } else {
6555                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6556                              BDINFO_FLAGS_DISABLED);
6557                 }
6558
6559         }
6560
6561         /* There is only one send ring on 5705/5750, no need to explicitly
6562          * disable the others.
6563          */
6564         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6565                 /* Clear out send RCB ring in SRAM. */
6566                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6567                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6568                                       BDINFO_FLAGS_DISABLED);
6569         }
6570
6571         tp->tx_prod = 0;
6572         tp->tx_cons = 0;
6573         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6574         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6575
6576         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6577                        tp->tx_desc_mapping,
6578                        (TG3_TX_RING_SIZE <<
6579                         BDINFO_FLAGS_MAXLEN_SHIFT),
6580                        NIC_SRAM_TX_BUFFER_DESC);
6581
6582         /* There is only one receive return ring on 5705/5750, no need
6583          * to explicitly disable the others.
6584          */
6585         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6586                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6587                      i += TG3_BDINFO_SIZE) {
6588                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6589                                       BDINFO_FLAGS_DISABLED);
6590                 }
6591         }
6592
6593         tp->rx_rcb_ptr = 0;
6594         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6595
6596         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6597                        tp->rx_rcb_mapping,
6598                        (TG3_RX_RCB_RING_SIZE(tp) <<
6599                         BDINFO_FLAGS_MAXLEN_SHIFT),
6600                        0);
6601
6602         tp->rx_std_ptr = tp->rx_pending;
6603         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6604                      tp->rx_std_ptr);
6605
6606         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6607                                                 tp->rx_jumbo_pending : 0;
6608         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6609                      tp->rx_jumbo_ptr);
6610
6611         /* Initialize MAC address and backoff seed. */
6612         __tg3_set_mac_addr(tp, 0);
6613
6614         /* MTU + ethernet header + FCS + optional VLAN tag */
6615         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6616
6617         /* The slot time is changed by tg3_setup_phy if we
6618          * run at gigabit with half duplex.
6619          */
6620         tw32(MAC_TX_LENGTHS,
6621              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6622              (6 << TX_LENGTHS_IPG_SHIFT) |
6623              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6624
6625         /* Receive rules. */
6626         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6627         tw32(RCVLPC_CONFIG, 0x0181);
6628
6629         /* Calculate RDMAC_MODE setting early, we need it to determine
6630          * the RCVLPC_STATE_ENABLE mask.
6631          */
6632         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6633                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6634                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6635                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6636                       RDMAC_MODE_LNGREAD_ENAB);
6637
6638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6639                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6640                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6641                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6642
6643         /* If statement applies to 5705 and 5750 PCI devices only */
6644         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6645              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6646             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6647                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6648                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6649                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6650                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6651                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6652                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6653                 }
6654         }
6655
6656         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6657                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6658
6659         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6660                 rdmac_mode |= (1 << 27);
6661
6662         /* Receive/send statistics. */
6663         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6664                 val = tr32(RCVLPC_STATS_ENABLE);
6665                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6666                 tw32(RCVLPC_STATS_ENABLE, val);
6667         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6668                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6669                 val = tr32(RCVLPC_STATS_ENABLE);
6670                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6671                 tw32(RCVLPC_STATS_ENABLE, val);
6672         } else {
6673                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6674         }
6675         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6676         tw32(SNDDATAI_STATSENAB, 0xffffff);
6677         tw32(SNDDATAI_STATSCTRL,
6678              (SNDDATAI_SCTRL_ENABLE |
6679               SNDDATAI_SCTRL_FASTUPD));
6680
6681         /* Setup host coalescing engine. */
6682         tw32(HOSTCC_MODE, 0);
6683         for (i = 0; i < 2000; i++) {
6684                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6685                         break;
6686                 udelay(10);
6687         }
6688
6689         __tg3_set_coalesce(tp, &tp->coal);
6690
6691         /* set status block DMA address */
6692         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6693              ((u64) tp->status_mapping >> 32));
6694         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6695              ((u64) tp->status_mapping & 0xffffffff));
6696
6697         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6698                 /* Status/statistics block address.  See tg3_timer,
6699                  * the tg3_periodic_fetch_stats call there, and
6700                  * tg3_get_stats to see how this works for 5705/5750 chips.
6701                  */
6702                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6703                      ((u64) tp->stats_mapping >> 32));
6704                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6705                      ((u64) tp->stats_mapping & 0xffffffff));
6706                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6707                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6708         }
6709
6710         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6711
6712         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6713         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6714         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6715                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6716
6717         /* Clear statistics/status block in chip, and status block in ram. */
6718         for (i = NIC_SRAM_STATS_BLK;
6719              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6720              i += sizeof(u32)) {
6721                 tg3_write_mem(tp, i, 0);
6722                 udelay(40);
6723         }
6724         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6725
6726         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6727                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6728                 /* reset to prevent losing 1st rx packet intermittently */
6729                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6730                 udelay(10);
6731         }
6732
6733         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6734                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6735         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6736             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6737             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6738                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6739         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6740         udelay(40);
6741
6742         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6743          * If TG3_FLG2_IS_NIC is zero, we should read the
6744          * register to preserve the GPIO settings for LOMs. The GPIOs,
6745          * whether used as inputs or outputs, are set by boot code after
6746          * reset.
6747          */
6748         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6749                 u32 gpio_mask;
6750
6751                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6752                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6753                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6754
6755                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6756                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6757                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6758
6759                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6760                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6761
6762                 tp->grc_local_ctrl &= ~gpio_mask;
6763                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6764
6765                 /* GPIO1 must be driven high for eeprom write protect */
6766                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6767                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6768                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6769         }
6770         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6771         udelay(100);
6772
6773         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6774         tp->last_tag = 0;
6775
6776         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6777                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6778                 udelay(40);
6779         }
6780
6781         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6782                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6783                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6784                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6785                WDMAC_MODE_LNGREAD_ENAB);
6786
6787         /* If statement applies to 5705 and 5750 PCI devices only */
6788         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6789              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6790             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6791                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6792                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6793                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6794                         /* nothing */
6795                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6796                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6797                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6798                         val |= WDMAC_MODE_RX_ACCEL;
6799                 }
6800         }
6801
6802         /* Enable host coalescing bug fix */
6803         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6804             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6805             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6806             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6807                 val |= (1 << 29);
6808
6809         tw32_f(WDMAC_MODE, val);
6810         udelay(40);
6811
6812         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6813                 u16 pcix_cmd;
6814
6815                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6816                                      &pcix_cmd);
6817                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6818                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6819                         pcix_cmd |= PCI_X_CMD_READ_2K;
6820                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6821                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6822                         pcix_cmd |= PCI_X_CMD_READ_2K;
6823                 }
6824                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6825                                       pcix_cmd);
6826         }
6827
6828         tw32_f(RDMAC_MODE, rdmac_mode);
6829         udelay(40);
6830
6831         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6832         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6833                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6834
6835         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6836                 tw32(SNDDATAC_MODE,
6837                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6838         else
6839                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6840
6841         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6842         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6843         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6844         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6845         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6846                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6847         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6848         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6849
6850         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6851                 err = tg3_load_5701_a0_firmware_fix(tp);
6852                 if (err)
6853                         return err;
6854         }
6855
6856         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6857                 err = tg3_load_tso_firmware(tp);
6858                 if (err)
6859                         return err;
6860         }
6861
6862         tp->tx_mode = TX_MODE_ENABLE;
6863         tw32_f(MAC_TX_MODE, tp->tx_mode);
6864         udelay(100);
6865
6866         tp->rx_mode = RX_MODE_ENABLE;
6867         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6868             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6869                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6870
6871         tw32_f(MAC_RX_MODE, tp->rx_mode);
6872         udelay(10);
6873
6874         if (tp->link_config.phy_is_low_power) {
6875                 tp->link_config.phy_is_low_power = 0;
6876                 tp->link_config.speed = tp->link_config.orig_speed;
6877                 tp->link_config.duplex = tp->link_config.orig_duplex;
6878                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6879         }
6880
6881         tp->mi_mode = MAC_MI_MODE_BASE;
6882         tw32_f(MAC_MI_MODE, tp->mi_mode);
6883         udelay(80);
6884
6885         tw32(MAC_LED_CTRL, tp->led_ctrl);
6886
6887         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6888         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6889                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6890                 udelay(10);
6891         }
6892         tw32_f(MAC_RX_MODE, tp->rx_mode);
6893         udelay(10);
6894
6895         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6896                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6897                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6898                         /* Set drive transmission level to 1.2V  */
6899                         /* only if the signal pre-emphasis bit is not set  */
6900                         val = tr32(MAC_SERDES_CFG);
6901                         val &= 0xfffff000;
6902                         val |= 0x880;
6903                         tw32(MAC_SERDES_CFG, val);
6904                 }
6905                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6906                         tw32(MAC_SERDES_CFG, 0x616000);
6907         }
6908
6909         /* Prevent chip from dropping frames when flow control
6910          * is enabled.
6911          */
6912         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6913
6914         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6915             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6916                 /* Use hardware link auto-negotiation */
6917                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6918         }
6919
6920         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6921             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6922                 u32 tmp;
6923
6924                 tmp = tr32(SERDES_RX_CTRL);
6925                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6926                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6927                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6928                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6929         }
6930
6931         err = tg3_setup_phy(tp, 0);
6932         if (err)
6933                 return err;
6934
6935         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6936             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6937                 u32 tmp;
6938
6939                 /* Clear CRC stats. */
6940                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6941                         tg3_writephy(tp, MII_TG3_TEST1,
6942                                      tmp | MII_TG3_TEST1_CRC_EN);
6943                         tg3_readphy(tp, 0x14, &tmp);
6944                 }
6945         }
6946
6947         __tg3_set_rx_mode(tp->dev);
6948
6949         /* Initialize receive rules. */
6950         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6951         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6952         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6953         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6954
6955         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6956             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6957                 limit = 8;
6958         else
6959                 limit = 16;
6960         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6961                 limit -= 4;
6962         switch (limit) {
6963         case 16:
6964                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6965         case 15:
6966                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6967         case 14:
6968                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6969         case 13:
6970                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6971         case 12:
6972                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6973         case 11:
6974                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6975         case 10:
6976                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6977         case 9:
6978                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6979         case 8:
6980                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6981         case 7:
6982                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6983         case 6:
6984                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6985         case 5:
6986                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6987         case 4:
6988                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6989         case 3:
6990                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6991         case 2:
6992         case 1:
6993
6994         default:
6995                 break;
6996         };
6997
6998         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6999                 /* Write our heartbeat update interval to APE. */
7000                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7001                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7002
7003         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7004
7005         return 0;
7006 }
7007
7008 /* Called at device open time to get the chip ready for
7009  * packet processing.  Invoked with tp->lock held.
7010  */
7011 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7012 {
7013         int err;
7014
7015         /* Force the chip into D0. */
7016         err = tg3_set_power_state(tp, PCI_D0);
7017         if (err)
7018                 goto out;
7019
7020         tg3_switch_clocks(tp);
7021
7022         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7023
7024         err = tg3_reset_hw(tp, reset_phy);
7025
7026 out:
7027         return err;
7028 }
7029
7030 #define TG3_STAT_ADD32(PSTAT, REG) \
7031 do {    u32 __val = tr32(REG); \
7032         (PSTAT)->low += __val; \
7033         if ((PSTAT)->low < __val) \
7034                 (PSTAT)->high += 1; \
7035 } while (0)
7036
7037 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7038 {
7039         struct tg3_hw_stats *sp = tp->hw_stats;
7040
7041         if (!netif_carrier_ok(tp->dev))
7042                 return;
7043
7044         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7045         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7046         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7047         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7048         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7049         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7050         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7051         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7052         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7053         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7054         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7055         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7056         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7057
7058         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7059         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7060         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7061         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7062         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7063         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7064         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7065         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7066         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7067         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7068         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7069         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7070         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7071         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7072
7073         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7074         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7075         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7076 }
7077
7078 static void tg3_timer(unsigned long __opaque)
7079 {
7080         struct tg3 *tp = (struct tg3 *) __opaque;
7081
7082         if (tp->irq_sync)
7083                 goto restart_timer;
7084
7085         spin_lock(&tp->lock);
7086
7087         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7088                 /* All of this garbage is because when using non-tagged
7089                  * IRQ status the mailbox/status_block protocol the chip
7090                  * uses with the cpu is race prone.
7091                  */
7092                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7093                         tw32(GRC_LOCAL_CTRL,
7094                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7095                 } else {
7096                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7097                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7098                 }
7099
7100                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7101                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7102                         spin_unlock(&tp->lock);
7103                         schedule_work(&tp->reset_task);
7104                         return;
7105                 }
7106         }
7107
7108         /* This part only runs once per second. */
7109         if (!--tp->timer_counter) {
7110                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7111                         tg3_periodic_fetch_stats(tp);
7112
7113                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7114                         u32 mac_stat;
7115                         int phy_event;
7116
7117                         mac_stat = tr32(MAC_STATUS);
7118
7119                         phy_event = 0;
7120                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7121                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7122                                         phy_event = 1;
7123                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7124                                 phy_event = 1;
7125
7126                         if (phy_event)
7127                                 tg3_setup_phy(tp, 0);
7128                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7129                         u32 mac_stat = tr32(MAC_STATUS);
7130                         int need_setup = 0;
7131
7132                         if (netif_carrier_ok(tp->dev) &&
7133                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7134                                 need_setup = 1;
7135                         }
7136                         if (! netif_carrier_ok(tp->dev) &&
7137                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7138                                          MAC_STATUS_SIGNAL_DET))) {
7139                                 need_setup = 1;
7140                         }
7141                         if (need_setup) {
7142                                 if (!tp->serdes_counter) {
7143                                         tw32_f(MAC_MODE,
7144                                              (tp->mac_mode &
7145                                               ~MAC_MODE_PORT_MODE_MASK));
7146                                         udelay(40);
7147                                         tw32_f(MAC_MODE, tp->mac_mode);
7148                                         udelay(40);
7149                                 }
7150                                 tg3_setup_phy(tp, 0);
7151                         }
7152                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7153                         tg3_serdes_parallel_detect(tp);
7154
7155                 tp->timer_counter = tp->timer_multiplier;
7156         }
7157
7158         /* Heartbeat is only sent once every 2 seconds.
7159          *
7160          * The heartbeat is to tell the ASF firmware that the host
7161          * driver is still alive.  In the event that the OS crashes,
7162          * ASF needs to reset the hardware to free up the FIFO space
7163          * that may be filled with rx packets destined for the host.
7164          * If the FIFO is full, ASF will no longer function properly.
7165          *
7166          * Unintended resets have been reported on real time kernels
7167          * where the timer doesn't run on time.  Netpoll will also have
7168          * same problem.
7169          *
7170          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7171          * to check the ring condition when the heartbeat is expiring
7172          * before doing the reset.  This will prevent most unintended
7173          * resets.
7174          */
7175         if (!--tp->asf_counter) {
7176                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7177                         u32 val;
7178
7179                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7180                                       FWCMD_NICDRV_ALIVE3);
7181                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7182                         /* 5 seconds timeout */
7183                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7184                         val = tr32(GRC_RX_CPU_EVENT);
7185                         val |= (1 << 14);
7186                         tw32(GRC_RX_CPU_EVENT, val);
7187                 }
7188                 tp->asf_counter = tp->asf_multiplier;
7189         }
7190
7191         spin_unlock(&tp->lock);
7192
7193 restart_timer:
7194         tp->timer.expires = jiffies + tp->timer_offset;
7195         add_timer(&tp->timer);
7196 }
7197
7198 static int tg3_request_irq(struct tg3 *tp)
7199 {
7200         irq_handler_t fn;
7201         unsigned long flags;
7202         struct net_device *dev = tp->dev;
7203
7204         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7205                 fn = tg3_msi;
7206                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7207                         fn = tg3_msi_1shot;
7208                 flags = IRQF_SAMPLE_RANDOM;
7209         } else {
7210                 fn = tg3_interrupt;
7211                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7212                         fn = tg3_interrupt_tagged;
7213                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7214         }
7215         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7216 }
7217
7218 static int tg3_test_interrupt(struct tg3 *tp)
7219 {
7220         struct net_device *dev = tp->dev;
7221         int err, i, intr_ok = 0;
7222
7223         if (!netif_running(dev))
7224                 return -ENODEV;
7225
7226         tg3_disable_ints(tp);
7227
7228         free_irq(tp->pdev->irq, dev);
7229
7230         err = request_irq(tp->pdev->irq, tg3_test_isr,
7231                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7232         if (err)
7233                 return err;
7234
7235         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7236         tg3_enable_ints(tp);
7237
7238         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7239                HOSTCC_MODE_NOW);
7240
7241         for (i = 0; i < 5; i++) {
7242                 u32 int_mbox, misc_host_ctrl;
7243
7244                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7245                                         TG3_64BIT_REG_LOW);
7246                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7247
7248                 if ((int_mbox != 0) ||
7249                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7250                         intr_ok = 1;
7251                         break;
7252                 }
7253
7254                 msleep(10);
7255         }
7256
7257         tg3_disable_ints(tp);
7258
7259         free_irq(tp->pdev->irq, dev);
7260
7261         err = tg3_request_irq(tp);
7262
7263         if (err)
7264                 return err;
7265
7266         if (intr_ok)
7267                 return 0;
7268
7269         return -EIO;
7270 }
7271
7272 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7273  * successfully restored
7274  */
7275 static int tg3_test_msi(struct tg3 *tp)
7276 {
7277         struct net_device *dev = tp->dev;
7278         int err;
7279         u16 pci_cmd;
7280
7281         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7282                 return 0;
7283
7284         /* Turn off SERR reporting in case MSI terminates with Master
7285          * Abort.
7286          */
7287         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7288         pci_write_config_word(tp->pdev, PCI_COMMAND,
7289                               pci_cmd & ~PCI_COMMAND_SERR);
7290
7291         err = tg3_test_interrupt(tp);
7292
7293         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7294
7295         if (!err)
7296                 return 0;
7297
7298         /* other failures */
7299         if (err != -EIO)
7300                 return err;
7301
7302         /* MSI test failed, go back to INTx mode */
7303         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7304                "switching to INTx mode. Please report this failure to "
7305                "the PCI maintainer and include system chipset information.\n",
7306                        tp->dev->name);
7307
7308         free_irq(tp->pdev->irq, dev);
7309         pci_disable_msi(tp->pdev);
7310
7311         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7312
7313         err = tg3_request_irq(tp);
7314         if (err)
7315                 return err;
7316
7317         /* Need to reset the chip because the MSI cycle may have terminated
7318          * with Master Abort.
7319          */
7320         tg3_full_lock(tp, 1);
7321
7322         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7323         err = tg3_init_hw(tp, 1);
7324
7325         tg3_full_unlock(tp);
7326
7327         if (err)
7328                 free_irq(tp->pdev->irq, dev);
7329
7330         return err;
7331 }
7332
7333 static int tg3_open(struct net_device *dev)
7334 {
7335         struct tg3 *tp = netdev_priv(dev);
7336         int err;
7337
7338         netif_carrier_off(tp->dev);
7339
7340         tg3_full_lock(tp, 0);
7341
7342         err = tg3_set_power_state(tp, PCI_D0);
7343         if (err) {
7344                 tg3_full_unlock(tp);
7345                 return err;
7346         }
7347
7348         tg3_disable_ints(tp);
7349         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7350
7351         tg3_full_unlock(tp);
7352
7353         /* The placement of this call is tied
7354          * to the setup and use of Host TX descriptors.
7355          */
7356         err = tg3_alloc_consistent(tp);
7357         if (err)
7358                 return err;
7359
7360         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7361                 /* All MSI supporting chips should support tagged
7362                  * status.  Assert that this is the case.
7363                  */
7364                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7365                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7366                                "Not using MSI.\n", tp->dev->name);
7367                 } else if (pci_enable_msi(tp->pdev) == 0) {
7368                         u32 msi_mode;
7369
7370                         /* Hardware bug - MSI won't work if INTX disabled. */
7371                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7372                                 pci_intx(tp->pdev, 1);
7373
7374                         msi_mode = tr32(MSGINT_MODE);
7375                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7376                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7377                 }
7378         }
7379         err = tg3_request_irq(tp);
7380
7381         if (err) {
7382                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7383                         pci_disable_msi(tp->pdev);
7384                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7385                 }
7386                 tg3_free_consistent(tp);
7387                 return err;
7388         }
7389
7390         napi_enable(&tp->napi);
7391
7392         tg3_full_lock(tp, 0);
7393
7394         err = tg3_init_hw(tp, 1);
7395         if (err) {
7396                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7397                 tg3_free_rings(tp);
7398         } else {
7399                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7400                         tp->timer_offset = HZ;
7401                 else
7402                         tp->timer_offset = HZ / 10;
7403
7404                 BUG_ON(tp->timer_offset > HZ);
7405                 tp->timer_counter = tp->timer_multiplier =
7406                         (HZ / tp->timer_offset);
7407                 tp->asf_counter = tp->asf_multiplier =
7408                         ((HZ / tp->timer_offset) * 2);
7409
7410                 init_timer(&tp->timer);
7411                 tp->timer.expires = jiffies + tp->timer_offset;
7412                 tp->timer.data = (unsigned long) tp;
7413                 tp->timer.function = tg3_timer;
7414         }
7415
7416         tg3_full_unlock(tp);
7417
7418         if (err) {
7419                 napi_disable(&tp->napi);
7420                 free_irq(tp->pdev->irq, dev);
7421                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7422                         pci_disable_msi(tp->pdev);
7423                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7424                 }
7425                 tg3_free_consistent(tp);
7426                 return err;
7427         }
7428
7429         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7430                 err = tg3_test_msi(tp);
7431
7432                 if (err) {
7433                         tg3_full_lock(tp, 0);
7434
7435                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7436                                 pci_disable_msi(tp->pdev);
7437                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7438                         }
7439                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7440                         tg3_free_rings(tp);
7441                         tg3_free_consistent(tp);
7442
7443                         tg3_full_unlock(tp);
7444
7445                         napi_disable(&tp->napi);
7446
7447                         return err;
7448                 }
7449
7450                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7451                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7452                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7453
7454                                 tw32(PCIE_TRANSACTION_CFG,
7455                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7456                         }
7457                 }
7458         }
7459
7460         tg3_full_lock(tp, 0);
7461
7462         add_timer(&tp->timer);
7463         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7464         tg3_enable_ints(tp);
7465
7466         tg3_full_unlock(tp);
7467
7468         netif_start_queue(dev);
7469
7470         return 0;
7471 }
7472
7473 #if 0
7474 /*static*/ void tg3_dump_state(struct tg3 *tp)
7475 {
7476         u32 val32, val32_2, val32_3, val32_4, val32_5;
7477         u16 val16;
7478         int i;
7479
7480         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7481         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7482         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7483                val16, val32);
7484
7485         /* MAC block */
7486         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7487                tr32(MAC_MODE), tr32(MAC_STATUS));
7488         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7489                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7490         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7491                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7492         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7493                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7494
7495         /* Send data initiator control block */
7496         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7497                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7498         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7499                tr32(SNDDATAI_STATSCTRL));
7500
7501         /* Send data completion control block */
7502         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7503
7504         /* Send BD ring selector block */
7505         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7506                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7507
7508         /* Send BD initiator control block */
7509         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7510                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7511
7512         /* Send BD completion control block */
7513         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7514
7515         /* Receive list placement control block */
7516         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7517                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7518         printk("       RCVLPC_STATSCTRL[%08x]\n",
7519                tr32(RCVLPC_STATSCTRL));
7520
7521         /* Receive data and receive BD initiator control block */
7522         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7523                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7524
7525         /* Receive data completion control block */
7526         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7527                tr32(RCVDCC_MODE));
7528
7529         /* Receive BD initiator control block */
7530         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7531                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7532
7533         /* Receive BD completion control block */
7534         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7535                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7536
7537         /* Receive list selector control block */
7538         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7539                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7540
7541         /* Mbuf cluster free block */
7542         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7543                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7544
7545         /* Host coalescing control block */
7546         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7547                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7548         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7549                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7550                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7551         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7552                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7553                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7554         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7555                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7556         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7557                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7558
7559         /* Memory arbiter control block */
7560         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7561                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7562
7563         /* Buffer manager control block */
7564         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7565                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7566         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7567                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7568         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7569                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7570                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7571                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7572
7573         /* Read DMA control block */
7574         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7575                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7576
7577         /* Write DMA control block */
7578         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7579                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7580
7581         /* DMA completion block */
7582         printk("DEBUG: DMAC_MODE[%08x]\n",
7583                tr32(DMAC_MODE));
7584
7585         /* GRC block */
7586         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7587                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7588         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7589                tr32(GRC_LOCAL_CTRL));
7590
7591         /* TG3_BDINFOs */
7592         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7593                tr32(RCVDBDI_JUMBO_BD + 0x0),
7594                tr32(RCVDBDI_JUMBO_BD + 0x4),
7595                tr32(RCVDBDI_JUMBO_BD + 0x8),
7596                tr32(RCVDBDI_JUMBO_BD + 0xc));
7597         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7598                tr32(RCVDBDI_STD_BD + 0x0),
7599                tr32(RCVDBDI_STD_BD + 0x4),
7600                tr32(RCVDBDI_STD_BD + 0x8),
7601                tr32(RCVDBDI_STD_BD + 0xc));
7602         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7603                tr32(RCVDBDI_MINI_BD + 0x0),
7604                tr32(RCVDBDI_MINI_BD + 0x4),
7605                tr32(RCVDBDI_MINI_BD + 0x8),
7606                tr32(RCVDBDI_MINI_BD + 0xc));
7607
7608         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7609         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7610         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7611         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7612         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7613                val32, val32_2, val32_3, val32_4);
7614
7615         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7616         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7617         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7618         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7619         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7620                val32, val32_2, val32_3, val32_4);
7621
7622         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7623         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7624         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7625         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7626         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7627         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7628                val32, val32_2, val32_3, val32_4, val32_5);
7629
7630         /* SW status block */
7631         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7632                tp->hw_status->status,
7633                tp->hw_status->status_tag,
7634                tp->hw_status->rx_jumbo_consumer,
7635                tp->hw_status->rx_consumer,
7636                tp->hw_status->rx_mini_consumer,
7637                tp->hw_status->idx[0].rx_producer,
7638                tp->hw_status->idx[0].tx_consumer);
7639
7640         /* SW statistics block */
7641         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7642                ((u32 *)tp->hw_stats)[0],
7643                ((u32 *)tp->hw_stats)[1],
7644                ((u32 *)tp->hw_stats)[2],
7645                ((u32 *)tp->hw_stats)[3]);
7646
7647         /* Mailboxes */
7648         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7649                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7650                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7651                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7652                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7653
7654         /* NIC side send descriptors. */
7655         for (i = 0; i < 6; i++) {
7656                 unsigned long txd;
7657
7658                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7659                         + (i * sizeof(struct tg3_tx_buffer_desc));
7660                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7661                        i,
7662                        readl(txd + 0x0), readl(txd + 0x4),
7663                        readl(txd + 0x8), readl(txd + 0xc));
7664         }
7665
7666         /* NIC side RX descriptors. */
7667         for (i = 0; i < 6; i++) {
7668                 unsigned long rxd;
7669
7670                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7671                         + (i * sizeof(struct tg3_rx_buffer_desc));
7672                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7673                        i,
7674                        readl(rxd + 0x0), readl(rxd + 0x4),
7675                        readl(rxd + 0x8), readl(rxd + 0xc));
7676                 rxd += (4 * sizeof(u32));
7677                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7678                        i,
7679                        readl(rxd + 0x0), readl(rxd + 0x4),
7680                        readl(rxd + 0x8), readl(rxd + 0xc));
7681         }
7682
7683         for (i = 0; i < 6; i++) {
7684                 unsigned long rxd;
7685
7686                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7687                         + (i * sizeof(struct tg3_rx_buffer_desc));
7688                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7689                        i,
7690                        readl(rxd + 0x0), readl(rxd + 0x4),
7691                        readl(rxd + 0x8), readl(rxd + 0xc));
7692                 rxd += (4 * sizeof(u32));
7693                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7694                        i,
7695                        readl(rxd + 0x0), readl(rxd + 0x4),
7696                        readl(rxd + 0x8), readl(rxd + 0xc));
7697         }
7698 }
7699 #endif
7700
7701 static struct net_device_stats *tg3_get_stats(struct net_device *);
7702 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7703
7704 static int tg3_close(struct net_device *dev)
7705 {
7706         struct tg3 *tp = netdev_priv(dev);
7707
7708         napi_disable(&tp->napi);
7709         cancel_work_sync(&tp->reset_task);
7710
7711         netif_stop_queue(dev);
7712
7713         del_timer_sync(&tp->timer);
7714
7715         tg3_full_lock(tp, 1);
7716 #if 0
7717         tg3_dump_state(tp);
7718 #endif
7719
7720         tg3_disable_ints(tp);
7721
7722         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7723         tg3_free_rings(tp);
7724         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7725
7726         tg3_full_unlock(tp);
7727
7728         free_irq(tp->pdev->irq, dev);
7729         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7730                 pci_disable_msi(tp->pdev);
7731                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7732         }
7733
7734         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7735                sizeof(tp->net_stats_prev));
7736         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7737                sizeof(tp->estats_prev));
7738
7739         tg3_free_consistent(tp);
7740
7741         tg3_set_power_state(tp, PCI_D3hot);
7742
7743         netif_carrier_off(tp->dev);
7744
7745         return 0;
7746 }
7747
7748 static inline unsigned long get_stat64(tg3_stat64_t *val)
7749 {
7750         unsigned long ret;
7751
7752 #if (BITS_PER_LONG == 32)
7753         ret = val->low;
7754 #else
7755         ret = ((u64)val->high << 32) | ((u64)val->low);
7756 #endif
7757         return ret;
7758 }
7759
7760 static unsigned long calc_crc_errors(struct tg3 *tp)
7761 {
7762         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7763
7764         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7765             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7766              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7767                 u32 val;
7768
7769                 spin_lock_bh(&tp->lock);
7770                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7771                         tg3_writephy(tp, MII_TG3_TEST1,
7772                                      val | MII_TG3_TEST1_CRC_EN);
7773                         tg3_readphy(tp, 0x14, &val);
7774                 } else
7775                         val = 0;
7776                 spin_unlock_bh(&tp->lock);
7777
7778                 tp->phy_crc_errors += val;
7779
7780                 return tp->phy_crc_errors;
7781         }
7782
7783         return get_stat64(&hw_stats->rx_fcs_errors);
7784 }
7785
7786 #define ESTAT_ADD(member) \
7787         estats->member =        old_estats->member + \
7788                                 get_stat64(&hw_stats->member)
7789
7790 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7791 {
7792         struct tg3_ethtool_stats *estats = &tp->estats;
7793         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7794         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7795
7796         if (!hw_stats)
7797                 return old_estats;
7798
7799         ESTAT_ADD(rx_octets);
7800         ESTAT_ADD(rx_fragments);
7801         ESTAT_ADD(rx_ucast_packets);
7802         ESTAT_ADD(rx_mcast_packets);
7803         ESTAT_ADD(rx_bcast_packets);
7804         ESTAT_ADD(rx_fcs_errors);
7805         ESTAT_ADD(rx_align_errors);
7806         ESTAT_ADD(rx_xon_pause_rcvd);
7807         ESTAT_ADD(rx_xoff_pause_rcvd);
7808         ESTAT_ADD(rx_mac_ctrl_rcvd);
7809         ESTAT_ADD(rx_xoff_entered);
7810         ESTAT_ADD(rx_frame_too_long_errors);
7811         ESTAT_ADD(rx_jabbers);
7812         ESTAT_ADD(rx_undersize_packets);
7813         ESTAT_ADD(rx_in_length_errors);
7814         ESTAT_ADD(rx_out_length_errors);
7815         ESTAT_ADD(rx_64_or_less_octet_packets);
7816         ESTAT_ADD(rx_65_to_127_octet_packets);
7817         ESTAT_ADD(rx_128_to_255_octet_packets);
7818         ESTAT_ADD(rx_256_to_511_octet_packets);
7819         ESTAT_ADD(rx_512_to_1023_octet_packets);
7820         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7821         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7822         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7823         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7824         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7825
7826         ESTAT_ADD(tx_octets);
7827         ESTAT_ADD(tx_collisions);
7828         ESTAT_ADD(tx_xon_sent);
7829         ESTAT_ADD(tx_xoff_sent);
7830         ESTAT_ADD(tx_flow_control);
7831         ESTAT_ADD(tx_mac_errors);
7832         ESTAT_ADD(tx_single_collisions);
7833         ESTAT_ADD(tx_mult_collisions);
7834         ESTAT_ADD(tx_deferred);
7835         ESTAT_ADD(tx_excessive_collisions);
7836         ESTAT_ADD(tx_late_collisions);
7837         ESTAT_ADD(tx_collide_2times);
7838         ESTAT_ADD(tx_collide_3times);
7839         ESTAT_ADD(tx_collide_4times);
7840         ESTAT_ADD(tx_collide_5times);
7841         ESTAT_ADD(tx_collide_6times);
7842         ESTAT_ADD(tx_collide_7times);
7843         ESTAT_ADD(tx_collide_8times);
7844         ESTAT_ADD(tx_collide_9times);
7845         ESTAT_ADD(tx_collide_10times);
7846         ESTAT_ADD(tx_collide_11times);
7847         ESTAT_ADD(tx_collide_12times);
7848         ESTAT_ADD(tx_collide_13times);
7849         ESTAT_ADD(tx_collide_14times);
7850         ESTAT_ADD(tx_collide_15times);
7851         ESTAT_ADD(tx_ucast_packets);
7852         ESTAT_ADD(tx_mcast_packets);
7853         ESTAT_ADD(tx_bcast_packets);
7854         ESTAT_ADD(tx_carrier_sense_errors);
7855         ESTAT_ADD(tx_discards);
7856         ESTAT_ADD(tx_errors);
7857
7858         ESTAT_ADD(dma_writeq_full);
7859         ESTAT_ADD(dma_write_prioq_full);
7860         ESTAT_ADD(rxbds_empty);
7861         ESTAT_ADD(rx_discards);
7862         ESTAT_ADD(rx_errors);
7863         ESTAT_ADD(rx_threshold_hit);
7864
7865         ESTAT_ADD(dma_readq_full);
7866         ESTAT_ADD(dma_read_prioq_full);
7867         ESTAT_ADD(tx_comp_queue_full);
7868
7869         ESTAT_ADD(ring_set_send_prod_index);
7870         ESTAT_ADD(ring_status_update);
7871         ESTAT_ADD(nic_irqs);
7872         ESTAT_ADD(nic_avoided_irqs);
7873         ESTAT_ADD(nic_tx_threshold_hit);
7874
7875         return estats;
7876 }
7877
7878 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7879 {
7880         struct tg3 *tp = netdev_priv(dev);
7881         struct net_device_stats *stats = &tp->net_stats;
7882         struct net_device_stats *old_stats = &tp->net_stats_prev;
7883         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7884
7885         if (!hw_stats)
7886                 return old_stats;
7887
7888         stats->rx_packets = old_stats->rx_packets +
7889                 get_stat64(&hw_stats->rx_ucast_packets) +
7890                 get_stat64(&hw_stats->rx_mcast_packets) +
7891                 get_stat64(&hw_stats->rx_bcast_packets);
7892
7893         stats->tx_packets = old_stats->tx_packets +
7894                 get_stat64(&hw_stats->tx_ucast_packets) +
7895                 get_stat64(&hw_stats->tx_mcast_packets) +
7896                 get_stat64(&hw_stats->tx_bcast_packets);
7897
7898         stats->rx_bytes = old_stats->rx_bytes +
7899                 get_stat64(&hw_stats->rx_octets);
7900         stats->tx_bytes = old_stats->tx_bytes +
7901                 get_stat64(&hw_stats->tx_octets);
7902
7903         stats->rx_errors = old_stats->rx_errors +
7904                 get_stat64(&hw_stats->rx_errors);
7905         stats->tx_errors = old_stats->tx_errors +
7906                 get_stat64(&hw_stats->tx_errors) +
7907                 get_stat64(&hw_stats->tx_mac_errors) +
7908                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7909                 get_stat64(&hw_stats->tx_discards);
7910
7911         stats->multicast = old_stats->multicast +
7912                 get_stat64(&hw_stats->rx_mcast_packets);
7913         stats->collisions = old_stats->collisions +
7914                 get_stat64(&hw_stats->tx_collisions);
7915
7916         stats->rx_length_errors = old_stats->rx_length_errors +
7917                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7918                 get_stat64(&hw_stats->rx_undersize_packets);
7919
7920         stats->rx_over_errors = old_stats->rx_over_errors +
7921                 get_stat64(&hw_stats->rxbds_empty);
7922         stats->rx_frame_errors = old_stats->rx_frame_errors +
7923                 get_stat64(&hw_stats->rx_align_errors);
7924         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7925                 get_stat64(&hw_stats->tx_discards);
7926         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7927                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7928
7929         stats->rx_crc_errors = old_stats->rx_crc_errors +
7930                 calc_crc_errors(tp);
7931
7932         stats->rx_missed_errors = old_stats->rx_missed_errors +
7933                 get_stat64(&hw_stats->rx_discards);
7934
7935         return stats;
7936 }
7937
7938 static inline u32 calc_crc(unsigned char *buf, int len)
7939 {
7940         u32 reg;
7941         u32 tmp;
7942         int j, k;
7943
7944         reg = 0xffffffff;
7945
7946         for (j = 0; j < len; j++) {
7947                 reg ^= buf[j];
7948
7949                 for (k = 0; k < 8; k++) {
7950                         tmp = reg & 0x01;
7951
7952                         reg >>= 1;
7953
7954                         if (tmp) {
7955                                 reg ^= 0xedb88320;
7956                         }
7957                 }
7958         }
7959
7960         return ~reg;
7961 }
7962
7963 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7964 {
7965         /* accept or reject all multicast frames */
7966         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7967         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7968         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7969         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7970 }
7971
7972 static void __tg3_set_rx_mode(struct net_device *dev)
7973 {
7974         struct tg3 *tp = netdev_priv(dev);
7975         u32 rx_mode;
7976
7977         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7978                                   RX_MODE_KEEP_VLAN_TAG);
7979
7980         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7981          * flag clear.
7982          */
7983 #if TG3_VLAN_TAG_USED
7984         if (!tp->vlgrp &&
7985             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7986                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7987 #else
7988         /* By definition, VLAN is disabled always in this
7989          * case.
7990          */
7991         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7992                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7993 #endif
7994
7995         if (dev->flags & IFF_PROMISC) {
7996                 /* Promiscuous mode. */
7997                 rx_mode |= RX_MODE_PROMISC;
7998         } else if (dev->flags & IFF_ALLMULTI) {
7999                 /* Accept all multicast. */
8000                 tg3_set_multi (tp, 1);
8001         } else if (dev->mc_count < 1) {
8002                 /* Reject all multicast. */
8003                 tg3_set_multi (tp, 0);
8004         } else {
8005                 /* Accept one or more multicast(s). */
8006                 struct dev_mc_list *mclist;
8007                 unsigned int i;
8008                 u32 mc_filter[4] = { 0, };
8009                 u32 regidx;
8010                 u32 bit;
8011                 u32 crc;
8012
8013                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8014                      i++, mclist = mclist->next) {
8015
8016                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8017                         bit = ~crc & 0x7f;
8018                         regidx = (bit & 0x60) >> 5;
8019                         bit &= 0x1f;
8020                         mc_filter[regidx] |= (1 << bit);
8021                 }
8022
8023                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8024                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8025                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8026                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8027         }
8028
8029         if (rx_mode != tp->rx_mode) {
8030                 tp->rx_mode = rx_mode;
8031                 tw32_f(MAC_RX_MODE, rx_mode);
8032                 udelay(10);
8033         }
8034 }
8035
8036 static void tg3_set_rx_mode(struct net_device *dev)
8037 {
8038         struct tg3 *tp = netdev_priv(dev);
8039
8040         if (!netif_running(dev))
8041                 return;
8042
8043         tg3_full_lock(tp, 0);
8044         __tg3_set_rx_mode(dev);
8045         tg3_full_unlock(tp);
8046 }
8047
8048 #define TG3_REGDUMP_LEN         (32 * 1024)
8049
8050 static int tg3_get_regs_len(struct net_device *dev)
8051 {
8052         return TG3_REGDUMP_LEN;
8053 }
8054
8055 static void tg3_get_regs(struct net_device *dev,
8056                 struct ethtool_regs *regs, void *_p)
8057 {
8058         u32 *p = _p;
8059         struct tg3 *tp = netdev_priv(dev);
8060         u8 *orig_p = _p;
8061         int i;
8062
8063         regs->version = 0;
8064
8065         memset(p, 0, TG3_REGDUMP_LEN);
8066
8067         if (tp->link_config.phy_is_low_power)
8068                 return;
8069
8070         tg3_full_lock(tp, 0);
8071
8072 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8073 #define GET_REG32_LOOP(base,len)                \
8074 do {    p = (u32 *)(orig_p + (base));           \
8075         for (i = 0; i < len; i += 4)            \
8076                 __GET_REG32((base) + i);        \
8077 } while (0)
8078 #define GET_REG32_1(reg)                        \
8079 do {    p = (u32 *)(orig_p + (reg));            \
8080         __GET_REG32((reg));                     \
8081 } while (0)
8082
8083         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8084         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8085         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8086         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8087         GET_REG32_1(SNDDATAC_MODE);
8088         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8089         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8090         GET_REG32_1(SNDBDC_MODE);
8091         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8092         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8093         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8094         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8095         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8096         GET_REG32_1(RCVDCC_MODE);
8097         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8098         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8099         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8100         GET_REG32_1(MBFREE_MODE);
8101         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8102         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8103         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8104         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8105         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8106         GET_REG32_1(RX_CPU_MODE);
8107         GET_REG32_1(RX_CPU_STATE);
8108         GET_REG32_1(RX_CPU_PGMCTR);
8109         GET_REG32_1(RX_CPU_HWBKPT);
8110         GET_REG32_1(TX_CPU_MODE);
8111         GET_REG32_1(TX_CPU_STATE);
8112         GET_REG32_1(TX_CPU_PGMCTR);
8113         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8114         GET_REG32_LOOP(FTQ_RESET, 0x120);
8115         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8116         GET_REG32_1(DMAC_MODE);
8117         GET_REG32_LOOP(GRC_MODE, 0x4c);
8118         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8119                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8120
8121 #undef __GET_REG32
8122 #undef GET_REG32_LOOP
8123 #undef GET_REG32_1
8124
8125         tg3_full_unlock(tp);
8126 }
8127
8128 static int tg3_get_eeprom_len(struct net_device *dev)
8129 {
8130         struct tg3 *tp = netdev_priv(dev);
8131
8132         return tp->nvram_size;
8133 }
8134
8135 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8136 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8137
8138 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8139 {
8140         struct tg3 *tp = netdev_priv(dev);
8141         int ret;
8142         u8  *pd;
8143         u32 i, offset, len, val, b_offset, b_count;
8144
8145         if (tp->link_config.phy_is_low_power)
8146                 return -EAGAIN;
8147
8148         offset = eeprom->offset;
8149         len = eeprom->len;
8150         eeprom->len = 0;
8151
8152         eeprom->magic = TG3_EEPROM_MAGIC;
8153
8154         if (offset & 3) {
8155                 /* adjustments to start on required 4 byte boundary */
8156                 b_offset = offset & 3;
8157                 b_count = 4 - b_offset;
8158                 if (b_count > len) {
8159                         /* i.e. offset=1 len=2 */
8160                         b_count = len;
8161                 }
8162                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8163                 if (ret)
8164                         return ret;
8165                 val = cpu_to_le32(val);
8166                 memcpy(data, ((char*)&val) + b_offset, b_count);
8167                 len -= b_count;
8168                 offset += b_count;
8169                 eeprom->len += b_count;
8170         }
8171
8172         /* read bytes upto the last 4 byte boundary */
8173         pd = &data[eeprom->len];
8174         for (i = 0; i < (len - (len & 3)); i += 4) {
8175                 ret = tg3_nvram_read(tp, offset + i, &val);
8176                 if (ret) {
8177                         eeprom->len += i;
8178                         return ret;
8179                 }
8180                 val = cpu_to_le32(val);
8181                 memcpy(pd + i, &val, 4);
8182         }
8183         eeprom->len += i;
8184
8185         if (len & 3) {
8186                 /* read last bytes not ending on 4 byte boundary */
8187                 pd = &data[eeprom->len];
8188                 b_count = len & 3;
8189                 b_offset = offset + len - b_count;
8190                 ret = tg3_nvram_read(tp, b_offset, &val);
8191                 if (ret)
8192                         return ret;
8193                 val = cpu_to_le32(val);
8194                 memcpy(pd, ((char*)&val), b_count);
8195                 eeprom->len += b_count;
8196         }
8197         return 0;
8198 }
8199
8200 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8201
8202 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8203 {
8204         struct tg3 *tp = netdev_priv(dev);
8205         int ret;
8206         u32 offset, len, b_offset, odd_len, start, end;
8207         u8 *buf;
8208
8209         if (tp->link_config.phy_is_low_power)
8210                 return -EAGAIN;
8211
8212         if (eeprom->magic != TG3_EEPROM_MAGIC)
8213                 return -EINVAL;
8214
8215         offset = eeprom->offset;
8216         len = eeprom->len;
8217
8218         if ((b_offset = (offset & 3))) {
8219                 /* adjustments to start on required 4 byte boundary */
8220                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8221                 if (ret)
8222                         return ret;
8223                 start = cpu_to_le32(start);
8224                 len += b_offset;
8225                 offset &= ~3;
8226                 if (len < 4)
8227                         len = 4;
8228         }
8229
8230         odd_len = 0;
8231         if (len & 3) {
8232                 /* adjustments to end on required 4 byte boundary */
8233                 odd_len = 1;
8234                 len = (len + 3) & ~3;
8235                 ret = tg3_nvram_read(tp, offset+len-4, &end);
8236                 if (ret)
8237                         return ret;
8238                 end = cpu_to_le32(end);
8239         }
8240
8241         buf = data;
8242         if (b_offset || odd_len) {
8243                 buf = kmalloc(len, GFP_KERNEL);
8244                 if (!buf)
8245                         return -ENOMEM;
8246                 if (b_offset)
8247                         memcpy(buf, &start, 4);
8248                 if (odd_len)
8249                         memcpy(buf+len-4, &end, 4);
8250                 memcpy(buf + b_offset, data, eeprom->len);
8251         }
8252
8253         ret = tg3_nvram_write_block(tp, offset, len, buf);
8254
8255         if (buf != data)
8256                 kfree(buf);
8257
8258         return ret;
8259 }
8260
8261 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8262 {
8263         struct tg3 *tp = netdev_priv(dev);
8264
8265         cmd->supported = (SUPPORTED_Autoneg);
8266
8267         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8268                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8269                                    SUPPORTED_1000baseT_Full);
8270
8271         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8272                 cmd->supported |= (SUPPORTED_100baseT_Half |
8273                                   SUPPORTED_100baseT_Full |
8274                                   SUPPORTED_10baseT_Half |
8275                                   SUPPORTED_10baseT_Full |
8276                                   SUPPORTED_MII);
8277                 cmd->port = PORT_TP;
8278         } else {
8279                 cmd->supported |= SUPPORTED_FIBRE;
8280                 cmd->port = PORT_FIBRE;
8281         }
8282
8283         cmd->advertising = tp->link_config.advertising;
8284         if (netif_running(dev)) {
8285                 cmd->speed = tp->link_config.active_speed;
8286                 cmd->duplex = tp->link_config.active_duplex;
8287         }
8288         cmd->phy_address = PHY_ADDR;
8289         cmd->transceiver = 0;
8290         cmd->autoneg = tp->link_config.autoneg;
8291         cmd->maxtxpkt = 0;
8292         cmd->maxrxpkt = 0;
8293         return 0;
8294 }
8295
8296 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8297 {
8298         struct tg3 *tp = netdev_priv(dev);
8299
8300         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8301                 /* These are the only valid advertisement bits allowed.  */
8302                 if (cmd->autoneg == AUTONEG_ENABLE &&
8303                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8304                                           ADVERTISED_1000baseT_Full |
8305                                           ADVERTISED_Autoneg |
8306                                           ADVERTISED_FIBRE)))
8307                         return -EINVAL;
8308                 /* Fiber can only do SPEED_1000.  */
8309                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8310                          (cmd->speed != SPEED_1000))
8311                         return -EINVAL;
8312         /* Copper cannot force SPEED_1000.  */
8313         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8314                    (cmd->speed == SPEED_1000))
8315                 return -EINVAL;
8316         else if ((cmd->speed == SPEED_1000) &&
8317                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8318                 return -EINVAL;
8319
8320         tg3_full_lock(tp, 0);
8321
8322         tp->link_config.autoneg = cmd->autoneg;
8323         if (cmd->autoneg == AUTONEG_ENABLE) {
8324                 tp->link_config.advertising = (cmd->advertising |
8325                                               ADVERTISED_Autoneg);
8326                 tp->link_config.speed = SPEED_INVALID;
8327                 tp->link_config.duplex = DUPLEX_INVALID;
8328         } else {
8329                 tp->link_config.advertising = 0;
8330                 tp->link_config.speed = cmd->speed;
8331                 tp->link_config.duplex = cmd->duplex;
8332         }
8333
8334         tp->link_config.orig_speed = tp->link_config.speed;
8335         tp->link_config.orig_duplex = tp->link_config.duplex;
8336         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8337
8338         if (netif_running(dev))
8339                 tg3_setup_phy(tp, 1);
8340
8341         tg3_full_unlock(tp);
8342
8343         return 0;
8344 }
8345
8346 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8347 {
8348         struct tg3 *tp = netdev_priv(dev);
8349
8350         strcpy(info->driver, DRV_MODULE_NAME);
8351         strcpy(info->version, DRV_MODULE_VERSION);
8352         strcpy(info->fw_version, tp->fw_ver);
8353         strcpy(info->bus_info, pci_name(tp->pdev));
8354 }
8355
8356 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8357 {
8358         struct tg3 *tp = netdev_priv(dev);
8359
8360         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8361                 wol->supported = WAKE_MAGIC;
8362         else
8363                 wol->supported = 0;
8364         wol->wolopts = 0;
8365         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8366                 wol->wolopts = WAKE_MAGIC;
8367         memset(&wol->sopass, 0, sizeof(wol->sopass));
8368 }
8369
8370 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8371 {
8372         struct tg3 *tp = netdev_priv(dev);
8373
8374         if (wol->wolopts & ~WAKE_MAGIC)
8375                 return -EINVAL;
8376         if ((wol->wolopts & WAKE_MAGIC) &&
8377             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8378                 return -EINVAL;
8379
8380         spin_lock_bh(&tp->lock);
8381         if (wol->wolopts & WAKE_MAGIC)
8382                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8383         else
8384                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8385         spin_unlock_bh(&tp->lock);
8386
8387         return 0;
8388 }
8389
8390 static u32 tg3_get_msglevel(struct net_device *dev)
8391 {
8392         struct tg3 *tp = netdev_priv(dev);
8393         return tp->msg_enable;
8394 }
8395
8396 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8397 {
8398         struct tg3 *tp = netdev_priv(dev);
8399         tp->msg_enable = value;
8400 }
8401
8402 static int tg3_set_tso(struct net_device *dev, u32 value)
8403 {
8404         struct tg3 *tp = netdev_priv(dev);
8405
8406         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8407                 if (value)
8408                         return -EINVAL;
8409                 return 0;
8410         }
8411         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8412             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8413                 if (value) {
8414                         dev->features |= NETIF_F_TSO6;
8415                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8416                                 dev->features |= NETIF_F_TSO_ECN;
8417                 } else
8418                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8419         }
8420         return ethtool_op_set_tso(dev, value);
8421 }
8422
8423 static int tg3_nway_reset(struct net_device *dev)
8424 {
8425         struct tg3 *tp = netdev_priv(dev);
8426         u32 bmcr;
8427         int r;
8428
8429         if (!netif_running(dev))
8430                 return -EAGAIN;
8431
8432         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8433                 return -EINVAL;
8434
8435         spin_lock_bh(&tp->lock);
8436         r = -EINVAL;
8437         tg3_readphy(tp, MII_BMCR, &bmcr);
8438         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8439             ((bmcr & BMCR_ANENABLE) ||
8440              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8441                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8442                                            BMCR_ANENABLE);
8443                 r = 0;
8444         }
8445         spin_unlock_bh(&tp->lock);
8446
8447         return r;
8448 }
8449
8450 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8451 {
8452         struct tg3 *tp = netdev_priv(dev);
8453
8454         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8455         ering->rx_mini_max_pending = 0;
8456         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8457                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8458         else
8459                 ering->rx_jumbo_max_pending = 0;
8460
8461         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8462
8463         ering->rx_pending = tp->rx_pending;
8464         ering->rx_mini_pending = 0;
8465         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8466                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8467         else
8468                 ering->rx_jumbo_pending = 0;
8469
8470         ering->tx_pending = tp->tx_pending;
8471 }
8472
8473 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8474 {
8475         struct tg3 *tp = netdev_priv(dev);
8476         int irq_sync = 0, err = 0;
8477
8478         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8479             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8480             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8481             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8482             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8483              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8484                 return -EINVAL;
8485
8486         if (netif_running(dev)) {
8487                 tg3_netif_stop(tp);
8488                 irq_sync = 1;
8489         }
8490
8491         tg3_full_lock(tp, irq_sync);
8492
8493         tp->rx_pending = ering->rx_pending;
8494
8495         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8496             tp->rx_pending > 63)
8497                 tp->rx_pending = 63;
8498         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8499         tp->tx_pending = ering->tx_pending;
8500
8501         if (netif_running(dev)) {
8502                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8503                 err = tg3_restart_hw(tp, 1);
8504                 if (!err)
8505                         tg3_netif_start(tp);
8506         }
8507
8508         tg3_full_unlock(tp);
8509
8510         return err;
8511 }
8512
8513 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8514 {
8515         struct tg3 *tp = netdev_priv(dev);
8516
8517         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8518         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8519         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8520 }
8521
8522 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8523 {
8524         struct tg3 *tp = netdev_priv(dev);
8525         int irq_sync = 0, err = 0;
8526
8527         if (netif_running(dev)) {
8528                 tg3_netif_stop(tp);
8529                 irq_sync = 1;
8530         }
8531
8532         tg3_full_lock(tp, irq_sync);
8533
8534         if (epause->autoneg)
8535                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8536         else
8537                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8538         if (epause->rx_pause)
8539                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8540         else
8541                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8542         if (epause->tx_pause)
8543                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8544         else
8545                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8546
8547         if (netif_running(dev)) {
8548                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8549                 err = tg3_restart_hw(tp, 1);
8550                 if (!err)
8551                         tg3_netif_start(tp);
8552         }
8553
8554         tg3_full_unlock(tp);
8555
8556         return err;
8557 }
8558
8559 static u32 tg3_get_rx_csum(struct net_device *dev)
8560 {
8561         struct tg3 *tp = netdev_priv(dev);
8562         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8563 }
8564
8565 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8566 {
8567         struct tg3 *tp = netdev_priv(dev);
8568
8569         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8570                 if (data != 0)
8571                         return -EINVAL;
8572                 return 0;
8573         }
8574
8575         spin_lock_bh(&tp->lock);
8576         if (data)
8577                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8578         else
8579                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8580         spin_unlock_bh(&tp->lock);
8581
8582         return 0;
8583 }
8584
8585 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8586 {
8587         struct tg3 *tp = netdev_priv(dev);
8588
8589         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8590                 if (data != 0)
8591                         return -EINVAL;
8592                 return 0;
8593         }
8594
8595         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8596             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8597             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8598             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8599                 ethtool_op_set_tx_ipv6_csum(dev, data);
8600         else
8601                 ethtool_op_set_tx_csum(dev, data);
8602
8603         return 0;
8604 }
8605
8606 static int tg3_get_sset_count (struct net_device *dev, int sset)
8607 {
8608         switch (sset) {
8609         case ETH_SS_TEST:
8610                 return TG3_NUM_TEST;
8611         case ETH_SS_STATS:
8612                 return TG3_NUM_STATS;
8613         default:
8614                 return -EOPNOTSUPP;
8615         }
8616 }
8617
8618 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8619 {
8620         switch (stringset) {
8621         case ETH_SS_STATS:
8622                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8623                 break;
8624         case ETH_SS_TEST:
8625                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8626                 break;
8627         default:
8628                 WARN_ON(1);     /* we need a WARN() */
8629                 break;
8630         }
8631 }
8632
8633 static int tg3_phys_id(struct net_device *dev, u32 data)
8634 {
8635         struct tg3 *tp = netdev_priv(dev);
8636         int i;
8637
8638         if (!netif_running(tp->dev))
8639                 return -EAGAIN;
8640
8641         if (data == 0)
8642                 data = 2;
8643
8644         for (i = 0; i < (data * 2); i++) {
8645                 if ((i % 2) == 0)
8646                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8647                                            LED_CTRL_1000MBPS_ON |
8648                                            LED_CTRL_100MBPS_ON |
8649                                            LED_CTRL_10MBPS_ON |
8650                                            LED_CTRL_TRAFFIC_OVERRIDE |
8651                                            LED_CTRL_TRAFFIC_BLINK |
8652                                            LED_CTRL_TRAFFIC_LED);
8653
8654                 else
8655                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8656                                            LED_CTRL_TRAFFIC_OVERRIDE);
8657
8658                 if (msleep_interruptible(500))
8659                         break;
8660         }
8661         tw32(MAC_LED_CTRL, tp->led_ctrl);
8662         return 0;
8663 }
8664
8665 static void tg3_get_ethtool_stats (struct net_device *dev,
8666                                    struct ethtool_stats *estats, u64 *tmp_stats)
8667 {
8668         struct tg3 *tp = netdev_priv(dev);
8669         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8670 }
8671
8672 #define NVRAM_TEST_SIZE 0x100
8673 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8674 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8675 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8676
8677 static int tg3_test_nvram(struct tg3 *tp)
8678 {
8679         u32 *buf, csum, magic;
8680         int i, j, k, err = 0, size;
8681
8682         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8683                 return -EIO;
8684
8685         if (magic == TG3_EEPROM_MAGIC)
8686                 size = NVRAM_TEST_SIZE;
8687         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8688                 if ((magic & 0xe00000) == 0x200000)
8689                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8690                 else
8691                         return 0;
8692         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8693                 size = NVRAM_SELFBOOT_HW_SIZE;
8694         else
8695                 return -EIO;
8696
8697         buf = kmalloc(size, GFP_KERNEL);
8698         if (buf == NULL)
8699                 return -ENOMEM;
8700
8701         err = -EIO;
8702         for (i = 0, j = 0; i < size; i += 4, j++) {
8703                 u32 val;
8704
8705                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8706                         break;
8707                 buf[j] = cpu_to_le32(val);
8708         }
8709         if (i < size)
8710                 goto out;
8711
8712         /* Selfboot format */
8713         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8714             TG3_EEPROM_MAGIC_FW) {
8715                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8716
8717                 for (i = 0; i < size; i++)
8718                         csum8 += buf8[i];
8719
8720                 if (csum8 == 0) {
8721                         err = 0;
8722                         goto out;
8723                 }
8724
8725                 err = -EIO;
8726                 goto out;
8727         }
8728
8729         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8730             TG3_EEPROM_MAGIC_HW) {
8731                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8732                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8733                 u8 *buf8 = (u8 *) buf;
8734
8735                 /* Separate the parity bits and the data bytes.  */
8736                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8737                         if ((i == 0) || (i == 8)) {
8738                                 int l;
8739                                 u8 msk;
8740
8741                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8742                                         parity[k++] = buf8[i] & msk;
8743                                 i++;
8744                         }
8745                         else if (i == 16) {
8746                                 int l;
8747                                 u8 msk;
8748
8749                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8750                                         parity[k++] = buf8[i] & msk;
8751                                 i++;
8752
8753                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8754                                         parity[k++] = buf8[i] & msk;
8755                                 i++;
8756                         }
8757                         data[j++] = buf8[i];
8758                 }
8759
8760                 err = -EIO;
8761                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8762                         u8 hw8 = hweight8(data[i]);
8763
8764                         if ((hw8 & 0x1) && parity[i])
8765                                 goto out;
8766                         else if (!(hw8 & 0x1) && !parity[i])
8767                                 goto out;
8768                 }
8769                 err = 0;
8770                 goto out;
8771         }
8772
8773         /* Bootstrap checksum at offset 0x10 */
8774         csum = calc_crc((unsigned char *) buf, 0x10);
8775         if(csum != cpu_to_le32(buf[0x10/4]))
8776                 goto out;
8777
8778         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8779         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8780         if (csum != cpu_to_le32(buf[0xfc/4]))
8781                  goto out;
8782
8783         err = 0;
8784
8785 out:
8786         kfree(buf);
8787         return err;
8788 }
8789
8790 #define TG3_SERDES_TIMEOUT_SEC  2
8791 #define TG3_COPPER_TIMEOUT_SEC  6
8792
8793 static int tg3_test_link(struct tg3 *tp)
8794 {
8795         int i, max;
8796
8797         if (!netif_running(tp->dev))
8798                 return -ENODEV;
8799
8800         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8801                 max = TG3_SERDES_TIMEOUT_SEC;
8802         else
8803                 max = TG3_COPPER_TIMEOUT_SEC;
8804
8805         for (i = 0; i < max; i++) {
8806                 if (netif_carrier_ok(tp->dev))
8807                         return 0;
8808
8809                 if (msleep_interruptible(1000))
8810                         break;
8811         }
8812
8813         return -EIO;
8814 }
8815
8816 /* Only test the commonly used registers */
8817 static int tg3_test_registers(struct tg3 *tp)
8818 {
8819         int i, is_5705, is_5750;
8820         u32 offset, read_mask, write_mask, val, save_val, read_val;
8821         static struct {
8822                 u16 offset;
8823                 u16 flags;
8824 #define TG3_FL_5705     0x1
8825 #define TG3_FL_NOT_5705 0x2
8826 #define TG3_FL_NOT_5788 0x4
8827 #define TG3_FL_NOT_5750 0x8
8828                 u32 read_mask;
8829                 u32 write_mask;
8830         } reg_tbl[] = {
8831                 /* MAC Control Registers */
8832                 { MAC_MODE, TG3_FL_NOT_5705,
8833                         0x00000000, 0x00ef6f8c },
8834                 { MAC_MODE, TG3_FL_5705,
8835                         0x00000000, 0x01ef6b8c },
8836                 { MAC_STATUS, TG3_FL_NOT_5705,
8837                         0x03800107, 0x00000000 },
8838                 { MAC_STATUS, TG3_FL_5705,
8839                         0x03800100, 0x00000000 },
8840                 { MAC_ADDR_0_HIGH, 0x0000,
8841                         0x00000000, 0x0000ffff },
8842                 { MAC_ADDR_0_LOW, 0x0000,
8843                         0x00000000, 0xffffffff },
8844                 { MAC_RX_MTU_SIZE, 0x0000,
8845                         0x00000000, 0x0000ffff },
8846                 { MAC_TX_MODE, 0x0000,
8847                         0x00000000, 0x00000070 },
8848                 { MAC_TX_LENGTHS, 0x0000,
8849                         0x00000000, 0x00003fff },
8850                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8851                         0x00000000, 0x000007fc },
8852                 { MAC_RX_MODE, TG3_FL_5705,
8853                         0x00000000, 0x000007dc },
8854                 { MAC_HASH_REG_0, 0x0000,
8855                         0x00000000, 0xffffffff },
8856                 { MAC_HASH_REG_1, 0x0000,
8857                         0x00000000, 0xffffffff },
8858                 { MAC_HASH_REG_2, 0x0000,
8859                         0x00000000, 0xffffffff },
8860                 { MAC_HASH_REG_3, 0x0000,
8861                         0x00000000, 0xffffffff },
8862
8863                 /* Receive Data and Receive BD Initiator Control Registers. */
8864                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8865                         0x00000000, 0xffffffff },
8866                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8867                         0x00000000, 0xffffffff },
8868                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8869                         0x00000000, 0x00000003 },
8870                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8871                         0x00000000, 0xffffffff },
8872                 { RCVDBDI_STD_BD+0, 0x0000,
8873                         0x00000000, 0xffffffff },
8874                 { RCVDBDI_STD_BD+4, 0x0000,
8875                         0x00000000, 0xffffffff },
8876                 { RCVDBDI_STD_BD+8, 0x0000,
8877                         0x00000000, 0xffff0002 },
8878                 { RCVDBDI_STD_BD+0xc, 0x0000,
8879                         0x00000000, 0xffffffff },
8880
8881                 /* Receive BD Initiator Control Registers. */
8882                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8883                         0x00000000, 0xffffffff },
8884                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8885                         0x00000000, 0x000003ff },
8886                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8887                         0x00000000, 0xffffffff },
8888
8889                 /* Host Coalescing Control Registers. */
8890                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8891                         0x00000000, 0x00000004 },
8892                 { HOSTCC_MODE, TG3_FL_5705,
8893                         0x00000000, 0x000000f6 },
8894                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8895                         0x00000000, 0xffffffff },
8896                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8897                         0x00000000, 0x000003ff },
8898                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8899                         0x00000000, 0xffffffff },
8900                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8901                         0x00000000, 0x000003ff },
8902                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8903                         0x00000000, 0xffffffff },
8904                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8905                         0x00000000, 0x000000ff },
8906                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8907                         0x00000000, 0xffffffff },
8908                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8909                         0x00000000, 0x000000ff },
8910                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8911                         0x00000000, 0xffffffff },
8912                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8913                         0x00000000, 0xffffffff },
8914                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8915                         0x00000000, 0xffffffff },
8916                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8917                         0x00000000, 0x000000ff },
8918                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8919                         0x00000000, 0xffffffff },
8920                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8921                         0x00000000, 0x000000ff },
8922                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8923                         0x00000000, 0xffffffff },
8924                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8925                         0x00000000, 0xffffffff },
8926                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8927                         0x00000000, 0xffffffff },
8928                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8929                         0x00000000, 0xffffffff },
8930                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8931                         0x00000000, 0xffffffff },
8932                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8933                         0xffffffff, 0x00000000 },
8934                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8935                         0xffffffff, 0x00000000 },
8936
8937                 /* Buffer Manager Control Registers. */
8938                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8939                         0x00000000, 0x007fff80 },
8940                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8941                         0x00000000, 0x007fffff },
8942                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8943                         0x00000000, 0x0000003f },
8944                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8945                         0x00000000, 0x000001ff },
8946                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8947                         0x00000000, 0x000001ff },
8948                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8949                         0xffffffff, 0x00000000 },
8950                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8951                         0xffffffff, 0x00000000 },
8952
8953                 /* Mailbox Registers */
8954                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8955                         0x00000000, 0x000001ff },
8956                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8957                         0x00000000, 0x000001ff },
8958                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8959                         0x00000000, 0x000007ff },
8960                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8961                         0x00000000, 0x000001ff },
8962
8963                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8964         };
8965
8966         is_5705 = is_5750 = 0;
8967         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8968                 is_5705 = 1;
8969                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8970                         is_5750 = 1;
8971         }
8972
8973         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8974                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8975                         continue;
8976
8977                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8978                         continue;
8979
8980                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8981                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8982                         continue;
8983
8984                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8985                         continue;
8986
8987                 offset = (u32) reg_tbl[i].offset;
8988                 read_mask = reg_tbl[i].read_mask;
8989                 write_mask = reg_tbl[i].write_mask;
8990
8991                 /* Save the original register content */
8992                 save_val = tr32(offset);
8993
8994                 /* Determine the read-only value. */
8995                 read_val = save_val & read_mask;
8996
8997                 /* Write zero to the register, then make sure the read-only bits
8998                  * are not changed and the read/write bits are all zeros.
8999                  */
9000                 tw32(offset, 0);
9001
9002                 val = tr32(offset);
9003
9004                 /* Test the read-only and read/write bits. */
9005                 if (((val & read_mask) != read_val) || (val & write_mask))
9006                         goto out;
9007
9008                 /* Write ones to all the bits defined by RdMask and WrMask, then
9009                  * make sure the read-only bits are not changed and the
9010                  * read/write bits are all ones.
9011                  */
9012                 tw32(offset, read_mask | write_mask);
9013
9014                 val = tr32(offset);
9015
9016                 /* Test the read-only bits. */
9017                 if ((val & read_mask) != read_val)
9018                         goto out;
9019
9020                 /* Test the read/write bits. */
9021                 if ((val & write_mask) != write_mask)
9022                         goto out;
9023
9024                 tw32(offset, save_val);
9025         }
9026
9027         return 0;
9028
9029 out:
9030         if (netif_msg_hw(tp))
9031                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9032                        offset);
9033         tw32(offset, save_val);
9034         return -EIO;
9035 }
9036
9037 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9038 {
9039         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9040         int i;
9041         u32 j;
9042
9043         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
9044                 for (j = 0; j < len; j += 4) {
9045                         u32 val;
9046
9047                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9048                         tg3_read_mem(tp, offset + j, &val);
9049                         if (val != test_pattern[i])
9050                                 return -EIO;
9051                 }
9052         }
9053         return 0;
9054 }
9055
9056 static int tg3_test_memory(struct tg3 *tp)
9057 {
9058         static struct mem_entry {
9059                 u32 offset;
9060                 u32 len;
9061         } mem_tbl_570x[] = {
9062                 { 0x00000000, 0x00b50},
9063                 { 0x00002000, 0x1c000},
9064                 { 0xffffffff, 0x00000}
9065         }, mem_tbl_5705[] = {
9066                 { 0x00000100, 0x0000c},
9067                 { 0x00000200, 0x00008},
9068                 { 0x00004000, 0x00800},
9069                 { 0x00006000, 0x01000},
9070                 { 0x00008000, 0x02000},
9071                 { 0x00010000, 0x0e000},
9072                 { 0xffffffff, 0x00000}
9073         }, mem_tbl_5755[] = {
9074                 { 0x00000200, 0x00008},
9075                 { 0x00004000, 0x00800},
9076                 { 0x00006000, 0x00800},
9077                 { 0x00008000, 0x02000},
9078                 { 0x00010000, 0x0c000},
9079                 { 0xffffffff, 0x00000}
9080         }, mem_tbl_5906[] = {
9081                 { 0x00000200, 0x00008},
9082                 { 0x00004000, 0x00400},
9083                 { 0x00006000, 0x00400},
9084                 { 0x00008000, 0x01000},
9085                 { 0x00010000, 0x01000},
9086                 { 0xffffffff, 0x00000}
9087         };
9088         struct mem_entry *mem_tbl;
9089         int err = 0;
9090         int i;
9091
9092         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9093                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9094                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9095                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9096                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9097                         mem_tbl = mem_tbl_5755;
9098                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9099                         mem_tbl = mem_tbl_5906;
9100                 else
9101                         mem_tbl = mem_tbl_5705;
9102         } else
9103                 mem_tbl = mem_tbl_570x;
9104
9105         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9106                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9107                     mem_tbl[i].len)) != 0)
9108                         break;
9109         }
9110
9111         return err;
9112 }
9113
9114 #define TG3_MAC_LOOPBACK        0
9115 #define TG3_PHY_LOOPBACK        1
9116
9117 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9118 {
9119         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9120         u32 desc_idx;
9121         struct sk_buff *skb, *rx_skb;
9122         u8 *tx_data;
9123         dma_addr_t map;
9124         int num_pkts, tx_len, rx_len, i, err;
9125         struct tg3_rx_buffer_desc *desc;
9126
9127         if (loopback_mode == TG3_MAC_LOOPBACK) {
9128                 /* HW errata - mac loopback fails in some cases on 5780.
9129                  * Normal traffic and PHY loopback are not affected by
9130                  * errata.
9131                  */
9132                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9133                         return 0;
9134
9135                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9136                            MAC_MODE_PORT_INT_LPBACK;
9137                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9138                         mac_mode |= MAC_MODE_LINK_POLARITY;
9139                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9140                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9141                 else
9142                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9143                 tw32(MAC_MODE, mac_mode);
9144         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9145                 u32 val;
9146
9147                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9148                         u32 phytest;
9149
9150                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9151                                 u32 phy;
9152
9153                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9154                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9155                                 if (!tg3_readphy(tp, 0x1b, &phy))
9156                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9157                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9158                         }
9159                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9160                 } else
9161                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9162
9163                 tg3_phy_toggle_automdix(tp, 0);
9164
9165                 tg3_writephy(tp, MII_BMCR, val);
9166                 udelay(40);
9167
9168                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9169                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9170                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9171                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9172                 } else
9173                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9174
9175                 /* reset to prevent losing 1st rx packet intermittently */
9176                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9177                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9178                         udelay(10);
9179                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9180                 }
9181                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9182                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9183                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9184                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9185                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9186                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9187                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9188                 }
9189                 tw32(MAC_MODE, mac_mode);
9190         }
9191         else
9192                 return -EINVAL;
9193
9194         err = -EIO;
9195
9196         tx_len = 1514;
9197         skb = netdev_alloc_skb(tp->dev, tx_len);
9198         if (!skb)
9199                 return -ENOMEM;
9200
9201         tx_data = skb_put(skb, tx_len);
9202         memcpy(tx_data, tp->dev->dev_addr, 6);
9203         memset(tx_data + 6, 0x0, 8);
9204
9205         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9206
9207         for (i = 14; i < tx_len; i++)
9208                 tx_data[i] = (u8) (i & 0xff);
9209
9210         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9211
9212         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9213              HOSTCC_MODE_NOW);
9214
9215         udelay(10);
9216
9217         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9218
9219         num_pkts = 0;
9220
9221         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9222
9223         tp->tx_prod++;
9224         num_pkts++;
9225
9226         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9227                      tp->tx_prod);
9228         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9229
9230         udelay(10);
9231
9232         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9233         for (i = 0; i < 25; i++) {
9234                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9235                        HOSTCC_MODE_NOW);
9236
9237                 udelay(10);
9238
9239                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9240                 rx_idx = tp->hw_status->idx[0].rx_producer;
9241                 if ((tx_idx == tp->tx_prod) &&
9242                     (rx_idx == (rx_start_idx + num_pkts)))
9243                         break;
9244         }
9245
9246         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9247         dev_kfree_skb(skb);
9248
9249         if (tx_idx != tp->tx_prod)
9250                 goto out;
9251
9252         if (rx_idx != rx_start_idx + num_pkts)
9253                 goto out;
9254
9255         desc = &tp->rx_rcb[rx_start_idx];
9256         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9257         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9258         if (opaque_key != RXD_OPAQUE_RING_STD)
9259                 goto out;
9260
9261         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9262             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9263                 goto out;
9264
9265         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9266         if (rx_len != tx_len)
9267                 goto out;
9268
9269         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9270
9271         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9272         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9273
9274         for (i = 14; i < tx_len; i++) {
9275                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9276                         goto out;
9277         }
9278         err = 0;
9279
9280         /* tg3_free_rings will unmap and free the rx_skb */
9281 out:
9282         return err;
9283 }
9284
9285 #define TG3_MAC_LOOPBACK_FAILED         1
9286 #define TG3_PHY_LOOPBACK_FAILED         2
9287 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9288                                          TG3_PHY_LOOPBACK_FAILED)
9289
9290 static int tg3_test_loopback(struct tg3 *tp)
9291 {
9292         int err = 0;
9293         u32 cpmuctrl = 0;
9294
9295         if (!netif_running(tp->dev))
9296                 return TG3_LOOPBACK_FAILED;
9297
9298         err = tg3_reset_hw(tp, 1);
9299         if (err)
9300                 return TG3_LOOPBACK_FAILED;
9301
9302         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9303                 int i;
9304                 u32 status;
9305
9306                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9307
9308                 /* Wait for up to 40 microseconds to acquire lock. */
9309                 for (i = 0; i < 4; i++) {
9310                         status = tr32(TG3_CPMU_MUTEX_GNT);
9311                         if (status == CPMU_MUTEX_GNT_DRIVER)
9312                                 break;
9313                         udelay(10);
9314                 }
9315
9316                 if (status != CPMU_MUTEX_GNT_DRIVER)
9317                         return TG3_LOOPBACK_FAILED;
9318
9319                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9320
9321                 /* Turn off power management based on link speed. */
9322                 tw32(TG3_CPMU_CTRL,
9323                      cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE);
9324         }
9325
9326         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9327                 err |= TG3_MAC_LOOPBACK_FAILED;
9328
9329         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9330                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9331
9332                 /* Release the mutex */
9333                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9334         }
9335
9336         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9337                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9338                         err |= TG3_PHY_LOOPBACK_FAILED;
9339         }
9340
9341         return err;
9342 }
9343
9344 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9345                           u64 *data)
9346 {
9347         struct tg3 *tp = netdev_priv(dev);
9348
9349         if (tp->link_config.phy_is_low_power)
9350                 tg3_set_power_state(tp, PCI_D0);
9351
9352         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9353
9354         if (tg3_test_nvram(tp) != 0) {
9355                 etest->flags |= ETH_TEST_FL_FAILED;
9356                 data[0] = 1;
9357         }
9358         if (tg3_test_link(tp) != 0) {
9359                 etest->flags |= ETH_TEST_FL_FAILED;
9360                 data[1] = 1;
9361         }
9362         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9363                 int err, irq_sync = 0;
9364
9365                 if (netif_running(dev)) {
9366                         tg3_netif_stop(tp);
9367                         irq_sync = 1;
9368                 }
9369
9370                 tg3_full_lock(tp, irq_sync);
9371
9372                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9373                 err = tg3_nvram_lock(tp);
9374                 tg3_halt_cpu(tp, RX_CPU_BASE);
9375                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9376                         tg3_halt_cpu(tp, TX_CPU_BASE);
9377                 if (!err)
9378                         tg3_nvram_unlock(tp);
9379
9380                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9381                         tg3_phy_reset(tp);
9382
9383                 if (tg3_test_registers(tp) != 0) {
9384                         etest->flags |= ETH_TEST_FL_FAILED;
9385                         data[2] = 1;
9386                 }
9387                 if (tg3_test_memory(tp) != 0) {
9388                         etest->flags |= ETH_TEST_FL_FAILED;
9389                         data[3] = 1;
9390                 }
9391                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9392                         etest->flags |= ETH_TEST_FL_FAILED;
9393
9394                 tg3_full_unlock(tp);
9395
9396                 if (tg3_test_interrupt(tp) != 0) {
9397                         etest->flags |= ETH_TEST_FL_FAILED;
9398                         data[5] = 1;
9399                 }
9400
9401                 tg3_full_lock(tp, 0);
9402
9403                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9404                 if (netif_running(dev)) {
9405                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9406                         if (!tg3_restart_hw(tp, 1))
9407                                 tg3_netif_start(tp);
9408                 }
9409
9410                 tg3_full_unlock(tp);
9411         }
9412         if (tp->link_config.phy_is_low_power)
9413                 tg3_set_power_state(tp, PCI_D3hot);
9414
9415 }
9416
9417 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9418 {
9419         struct mii_ioctl_data *data = if_mii(ifr);
9420         struct tg3 *tp = netdev_priv(dev);
9421         int err;
9422
9423         switch(cmd) {
9424         case SIOCGMIIPHY:
9425                 data->phy_id = PHY_ADDR;
9426
9427                 /* fallthru */
9428         case SIOCGMIIREG: {
9429                 u32 mii_regval;
9430
9431                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9432                         break;                  /* We have no PHY */
9433
9434                 if (tp->link_config.phy_is_low_power)
9435                         return -EAGAIN;
9436
9437                 spin_lock_bh(&tp->lock);
9438                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9439                 spin_unlock_bh(&tp->lock);
9440
9441                 data->val_out = mii_regval;
9442
9443                 return err;
9444         }
9445
9446         case SIOCSMIIREG:
9447                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9448                         break;                  /* We have no PHY */
9449
9450                 if (!capable(CAP_NET_ADMIN))
9451                         return -EPERM;
9452
9453                 if (tp->link_config.phy_is_low_power)
9454                         return -EAGAIN;
9455
9456                 spin_lock_bh(&tp->lock);
9457                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9458                 spin_unlock_bh(&tp->lock);
9459
9460                 return err;
9461
9462         default:
9463                 /* do nothing */
9464                 break;
9465         }
9466         return -EOPNOTSUPP;
9467 }
9468
9469 #if TG3_VLAN_TAG_USED
9470 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9471 {
9472         struct tg3 *tp = netdev_priv(dev);
9473
9474         if (netif_running(dev))
9475                 tg3_netif_stop(tp);
9476
9477         tg3_full_lock(tp, 0);
9478
9479         tp->vlgrp = grp;
9480
9481         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9482         __tg3_set_rx_mode(dev);
9483
9484         if (netif_running(dev))
9485                 tg3_netif_start(tp);
9486
9487         tg3_full_unlock(tp);
9488 }
9489 #endif
9490
9491 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9492 {
9493         struct tg3 *tp = netdev_priv(dev);
9494
9495         memcpy(ec, &tp->coal, sizeof(*ec));
9496         return 0;
9497 }
9498
9499 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9500 {
9501         struct tg3 *tp = netdev_priv(dev);
9502         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9503         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9504
9505         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9506                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9507                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9508                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9509                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9510         }
9511
9512         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9513             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9514             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9515             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9516             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9517             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9518             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9519             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9520             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9521             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9522                 return -EINVAL;
9523
9524         /* No rx interrupts will be generated if both are zero */
9525         if ((ec->rx_coalesce_usecs == 0) &&
9526             (ec->rx_max_coalesced_frames == 0))
9527                 return -EINVAL;
9528
9529         /* No tx interrupts will be generated if both are zero */
9530         if ((ec->tx_coalesce_usecs == 0) &&
9531             (ec->tx_max_coalesced_frames == 0))
9532                 return -EINVAL;
9533
9534         /* Only copy relevant parameters, ignore all others. */
9535         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9536         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9537         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9538         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9539         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9540         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9541         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9542         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9543         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9544
9545         if (netif_running(dev)) {
9546                 tg3_full_lock(tp, 0);
9547                 __tg3_set_coalesce(tp, &tp->coal);
9548                 tg3_full_unlock(tp);
9549         }
9550         return 0;
9551 }
9552
9553 static const struct ethtool_ops tg3_ethtool_ops = {
9554         .get_settings           = tg3_get_settings,
9555         .set_settings           = tg3_set_settings,
9556         .get_drvinfo            = tg3_get_drvinfo,
9557         .get_regs_len           = tg3_get_regs_len,
9558         .get_regs               = tg3_get_regs,
9559         .get_wol                = tg3_get_wol,
9560         .set_wol                = tg3_set_wol,
9561         .get_msglevel           = tg3_get_msglevel,
9562         .set_msglevel           = tg3_set_msglevel,
9563         .nway_reset             = tg3_nway_reset,
9564         .get_link               = ethtool_op_get_link,
9565         .get_eeprom_len         = tg3_get_eeprom_len,
9566         .get_eeprom             = tg3_get_eeprom,
9567         .set_eeprom             = tg3_set_eeprom,
9568         .get_ringparam          = tg3_get_ringparam,
9569         .set_ringparam          = tg3_set_ringparam,
9570         .get_pauseparam         = tg3_get_pauseparam,
9571         .set_pauseparam         = tg3_set_pauseparam,
9572         .get_rx_csum            = tg3_get_rx_csum,
9573         .set_rx_csum            = tg3_set_rx_csum,
9574         .set_tx_csum            = tg3_set_tx_csum,
9575         .set_sg                 = ethtool_op_set_sg,
9576         .set_tso                = tg3_set_tso,
9577         .self_test              = tg3_self_test,
9578         .get_strings            = tg3_get_strings,
9579         .phys_id                = tg3_phys_id,
9580         .get_ethtool_stats      = tg3_get_ethtool_stats,
9581         .get_coalesce           = tg3_get_coalesce,
9582         .set_coalesce           = tg3_set_coalesce,
9583         .get_sset_count         = tg3_get_sset_count,
9584 };
9585
9586 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9587 {
9588         u32 cursize, val, magic;
9589
9590         tp->nvram_size = EEPROM_CHIP_SIZE;
9591
9592         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9593                 return;
9594
9595         if ((magic != TG3_EEPROM_MAGIC) &&
9596             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9597             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9598                 return;
9599
9600         /*
9601          * Size the chip by reading offsets at increasing powers of two.
9602          * When we encounter our validation signature, we know the addressing
9603          * has wrapped around, and thus have our chip size.
9604          */
9605         cursize = 0x10;
9606
9607         while (cursize < tp->nvram_size) {
9608                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9609                         return;
9610
9611                 if (val == magic)
9612                         break;
9613
9614                 cursize <<= 1;
9615         }
9616
9617         tp->nvram_size = cursize;
9618 }
9619
9620 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9621 {
9622         u32 val;
9623
9624         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9625                 return;
9626
9627         /* Selfboot format */
9628         if (val != TG3_EEPROM_MAGIC) {
9629                 tg3_get_eeprom_size(tp);
9630                 return;
9631         }
9632
9633         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9634                 if (val != 0) {
9635                         tp->nvram_size = (val >> 16) * 1024;
9636                         return;
9637                 }
9638         }
9639         tp->nvram_size = 0x80000;
9640 }
9641
9642 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9643 {
9644         u32 nvcfg1;
9645
9646         nvcfg1 = tr32(NVRAM_CFG1);
9647         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9648                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9649         }
9650         else {
9651                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9652                 tw32(NVRAM_CFG1, nvcfg1);
9653         }
9654
9655         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9656             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9657                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9658                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9659                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9660                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9661                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9662                                 break;
9663                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9664                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9665                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9666                                 break;
9667                         case FLASH_VENDOR_ATMEL_EEPROM:
9668                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9669                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9670                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9671                                 break;
9672                         case FLASH_VENDOR_ST:
9673                                 tp->nvram_jedecnum = JEDEC_ST;
9674                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9675                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9676                                 break;
9677                         case FLASH_VENDOR_SAIFUN:
9678                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9679                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9680                                 break;
9681                         case FLASH_VENDOR_SST_SMALL:
9682                         case FLASH_VENDOR_SST_LARGE:
9683                                 tp->nvram_jedecnum = JEDEC_SST;
9684                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9685                                 break;
9686                 }
9687         }
9688         else {
9689                 tp->nvram_jedecnum = JEDEC_ATMEL;
9690                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9691                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9692         }
9693 }
9694
9695 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9696 {
9697         u32 nvcfg1;
9698
9699         nvcfg1 = tr32(NVRAM_CFG1);
9700
9701         /* NVRAM protection for TPM */
9702         if (nvcfg1 & (1 << 27))
9703                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9704
9705         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9706                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9707                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9708                         tp->nvram_jedecnum = JEDEC_ATMEL;
9709                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9710                         break;
9711                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9712                         tp->nvram_jedecnum = JEDEC_ATMEL;
9713                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9714                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9715                         break;
9716                 case FLASH_5752VENDOR_ST_M45PE10:
9717                 case FLASH_5752VENDOR_ST_M45PE20:
9718                 case FLASH_5752VENDOR_ST_M45PE40:
9719                         tp->nvram_jedecnum = JEDEC_ST;
9720                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9721                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9722                         break;
9723         }
9724
9725         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9726                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9727                         case FLASH_5752PAGE_SIZE_256:
9728                                 tp->nvram_pagesize = 256;
9729                                 break;
9730                         case FLASH_5752PAGE_SIZE_512:
9731                                 tp->nvram_pagesize = 512;
9732                                 break;
9733                         case FLASH_5752PAGE_SIZE_1K:
9734                                 tp->nvram_pagesize = 1024;
9735                                 break;
9736                         case FLASH_5752PAGE_SIZE_2K:
9737                                 tp->nvram_pagesize = 2048;
9738                                 break;
9739                         case FLASH_5752PAGE_SIZE_4K:
9740                                 tp->nvram_pagesize = 4096;
9741                                 break;
9742                         case FLASH_5752PAGE_SIZE_264:
9743                                 tp->nvram_pagesize = 264;
9744                                 break;
9745                 }
9746         }
9747         else {
9748                 /* For eeprom, set pagesize to maximum eeprom size */
9749                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9750
9751                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9752                 tw32(NVRAM_CFG1, nvcfg1);
9753         }
9754 }
9755
9756 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9757 {
9758         u32 nvcfg1, protect = 0;
9759
9760         nvcfg1 = tr32(NVRAM_CFG1);
9761
9762         /* NVRAM protection for TPM */
9763         if (nvcfg1 & (1 << 27)) {
9764                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9765                 protect = 1;
9766         }
9767
9768         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9769         switch (nvcfg1) {
9770                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9771                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9772                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9773                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9774                         tp->nvram_jedecnum = JEDEC_ATMEL;
9775                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9776                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9777                         tp->nvram_pagesize = 264;
9778                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9779                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9780                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9781                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9782                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9783                         else
9784                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9785                         break;
9786                 case FLASH_5752VENDOR_ST_M45PE10:
9787                 case FLASH_5752VENDOR_ST_M45PE20:
9788                 case FLASH_5752VENDOR_ST_M45PE40:
9789                         tp->nvram_jedecnum = JEDEC_ST;
9790                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9791                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9792                         tp->nvram_pagesize = 256;
9793                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9794                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9795                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9796                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9797                         else
9798                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9799                         break;
9800         }
9801 }
9802
9803 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9804 {
9805         u32 nvcfg1;
9806
9807         nvcfg1 = tr32(NVRAM_CFG1);
9808
9809         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9810                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9811                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9812                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9813                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9814                         tp->nvram_jedecnum = JEDEC_ATMEL;
9815                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9816                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9817
9818                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9819                         tw32(NVRAM_CFG1, nvcfg1);
9820                         break;
9821                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9822                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9823                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9824                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9825                         tp->nvram_jedecnum = JEDEC_ATMEL;
9826                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9827                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9828                         tp->nvram_pagesize = 264;
9829                         break;
9830                 case FLASH_5752VENDOR_ST_M45PE10:
9831                 case FLASH_5752VENDOR_ST_M45PE20:
9832                 case FLASH_5752VENDOR_ST_M45PE40:
9833                         tp->nvram_jedecnum = JEDEC_ST;
9834                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9835                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9836                         tp->nvram_pagesize = 256;
9837                         break;
9838         }
9839 }
9840
9841 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9842 {
9843         u32 nvcfg1, protect = 0;
9844
9845         nvcfg1 = tr32(NVRAM_CFG1);
9846
9847         /* NVRAM protection for TPM */
9848         if (nvcfg1 & (1 << 27)) {
9849                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9850                 protect = 1;
9851         }
9852
9853         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9854         switch (nvcfg1) {
9855                 case FLASH_5761VENDOR_ATMEL_ADB021D:
9856                 case FLASH_5761VENDOR_ATMEL_ADB041D:
9857                 case FLASH_5761VENDOR_ATMEL_ADB081D:
9858                 case FLASH_5761VENDOR_ATMEL_ADB161D:
9859                 case FLASH_5761VENDOR_ATMEL_MDB021D:
9860                 case FLASH_5761VENDOR_ATMEL_MDB041D:
9861                 case FLASH_5761VENDOR_ATMEL_MDB081D:
9862                 case FLASH_5761VENDOR_ATMEL_MDB161D:
9863                         tp->nvram_jedecnum = JEDEC_ATMEL;
9864                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9865                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9866                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9867                         tp->nvram_pagesize = 256;
9868                         break;
9869                 case FLASH_5761VENDOR_ST_A_M45PE20:
9870                 case FLASH_5761VENDOR_ST_A_M45PE40:
9871                 case FLASH_5761VENDOR_ST_A_M45PE80:
9872                 case FLASH_5761VENDOR_ST_A_M45PE16:
9873                 case FLASH_5761VENDOR_ST_M_M45PE20:
9874                 case FLASH_5761VENDOR_ST_M_M45PE40:
9875                 case FLASH_5761VENDOR_ST_M_M45PE80:
9876                 case FLASH_5761VENDOR_ST_M_M45PE16:
9877                         tp->nvram_jedecnum = JEDEC_ST;
9878                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9879                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9880                         tp->nvram_pagesize = 256;
9881                         break;
9882         }
9883
9884         if (protect) {
9885                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9886         } else {
9887                 switch (nvcfg1) {
9888                         case FLASH_5761VENDOR_ATMEL_ADB161D:
9889                         case FLASH_5761VENDOR_ATMEL_MDB161D:
9890                         case FLASH_5761VENDOR_ST_A_M45PE16:
9891                         case FLASH_5761VENDOR_ST_M_M45PE16:
9892                                 tp->nvram_size = 0x100000;
9893                                 break;
9894                         case FLASH_5761VENDOR_ATMEL_ADB081D:
9895                         case FLASH_5761VENDOR_ATMEL_MDB081D:
9896                         case FLASH_5761VENDOR_ST_A_M45PE80:
9897                         case FLASH_5761VENDOR_ST_M_M45PE80:
9898                                 tp->nvram_size = 0x80000;
9899                                 break;
9900                         case FLASH_5761VENDOR_ATMEL_ADB041D:
9901                         case FLASH_5761VENDOR_ATMEL_MDB041D:
9902                         case FLASH_5761VENDOR_ST_A_M45PE40:
9903                         case FLASH_5761VENDOR_ST_M_M45PE40:
9904                                 tp->nvram_size = 0x40000;
9905                                 break;
9906                         case FLASH_5761VENDOR_ATMEL_ADB021D:
9907                         case FLASH_5761VENDOR_ATMEL_MDB021D:
9908                         case FLASH_5761VENDOR_ST_A_M45PE20:
9909                         case FLASH_5761VENDOR_ST_M_M45PE20:
9910                                 tp->nvram_size = 0x20000;
9911                                 break;
9912                 }
9913         }
9914 }
9915
9916 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9917 {
9918         tp->nvram_jedecnum = JEDEC_ATMEL;
9919         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9920         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9921 }
9922
9923 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9924 static void __devinit tg3_nvram_init(struct tg3 *tp)
9925 {
9926         tw32_f(GRC_EEPROM_ADDR,
9927              (EEPROM_ADDR_FSM_RESET |
9928               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9929                EEPROM_ADDR_CLKPERD_SHIFT)));
9930
9931         msleep(1);
9932
9933         /* Enable seeprom accesses. */
9934         tw32_f(GRC_LOCAL_CTRL,
9935              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9936         udelay(100);
9937
9938         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9939             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9940                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9941
9942                 if (tg3_nvram_lock(tp)) {
9943                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9944                                "tg3_nvram_init failed.\n", tp->dev->name);
9945                         return;
9946                 }
9947                 tg3_enable_nvram_access(tp);
9948
9949                 tp->nvram_size = 0;
9950
9951                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9952                         tg3_get_5752_nvram_info(tp);
9953                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9954                         tg3_get_5755_nvram_info(tp);
9955                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9956                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
9957                         tg3_get_5787_nvram_info(tp);
9958                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9959                         tg3_get_5761_nvram_info(tp);
9960                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9961                         tg3_get_5906_nvram_info(tp);
9962                 else
9963                         tg3_get_nvram_info(tp);
9964
9965                 if (tp->nvram_size == 0)
9966                         tg3_get_nvram_size(tp);
9967
9968                 tg3_disable_nvram_access(tp);
9969                 tg3_nvram_unlock(tp);
9970
9971         } else {
9972                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9973
9974                 tg3_get_eeprom_size(tp);
9975         }
9976 }
9977
9978 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9979                                         u32 offset, u32 *val)
9980 {
9981         u32 tmp;
9982         int i;
9983
9984         if (offset > EEPROM_ADDR_ADDR_MASK ||
9985             (offset % 4) != 0)
9986                 return -EINVAL;
9987
9988         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9989                                         EEPROM_ADDR_DEVID_MASK |
9990                                         EEPROM_ADDR_READ);
9991         tw32(GRC_EEPROM_ADDR,
9992              tmp |
9993              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9994              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9995               EEPROM_ADDR_ADDR_MASK) |
9996              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9997
9998         for (i = 0; i < 1000; i++) {
9999                 tmp = tr32(GRC_EEPROM_ADDR);
10000
10001                 if (tmp & EEPROM_ADDR_COMPLETE)
10002                         break;
10003                 msleep(1);
10004         }
10005         if (!(tmp & EEPROM_ADDR_COMPLETE))
10006                 return -EBUSY;
10007
10008         *val = tr32(GRC_EEPROM_DATA);
10009         return 0;
10010 }
10011
10012 #define NVRAM_CMD_TIMEOUT 10000
10013
10014 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10015 {
10016         int i;
10017
10018         tw32(NVRAM_CMD, nvram_cmd);
10019         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10020                 udelay(10);
10021                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10022                         udelay(10);
10023                         break;
10024                 }
10025         }
10026         if (i == NVRAM_CMD_TIMEOUT) {
10027                 return -EBUSY;
10028         }
10029         return 0;
10030 }
10031
10032 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10033 {
10034         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10035             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10036             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10037            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10038             (tp->nvram_jedecnum == JEDEC_ATMEL))
10039
10040                 addr = ((addr / tp->nvram_pagesize) <<
10041                         ATMEL_AT45DB0X1B_PAGE_POS) +
10042                        (addr % tp->nvram_pagesize);
10043
10044         return addr;
10045 }
10046
10047 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10048 {
10049         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10050             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10051             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10052            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10053             (tp->nvram_jedecnum == JEDEC_ATMEL))
10054
10055                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10056                         tp->nvram_pagesize) +
10057                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10058
10059         return addr;
10060 }
10061
10062 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10063 {
10064         int ret;
10065
10066         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10067                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10068
10069         offset = tg3_nvram_phys_addr(tp, offset);
10070
10071         if (offset > NVRAM_ADDR_MSK)
10072                 return -EINVAL;
10073
10074         ret = tg3_nvram_lock(tp);
10075         if (ret)
10076                 return ret;
10077
10078         tg3_enable_nvram_access(tp);
10079
10080         tw32(NVRAM_ADDR, offset);
10081         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10082                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10083
10084         if (ret == 0)
10085                 *val = swab32(tr32(NVRAM_RDDATA));
10086
10087         tg3_disable_nvram_access(tp);
10088
10089         tg3_nvram_unlock(tp);
10090
10091         return ret;
10092 }
10093
10094 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10095 {
10096         int err;
10097         u32 tmp;
10098
10099         err = tg3_nvram_read(tp, offset, &tmp);
10100         *val = swab32(tmp);
10101         return err;
10102 }
10103
10104 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10105                                     u32 offset, u32 len, u8 *buf)
10106 {
10107         int i, j, rc = 0;
10108         u32 val;
10109
10110         for (i = 0; i < len; i += 4) {
10111                 u32 addr, data;
10112
10113                 addr = offset + i;
10114
10115                 memcpy(&data, buf + i, 4);
10116
10117                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10118
10119                 val = tr32(GRC_EEPROM_ADDR);
10120                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10121
10122                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10123                         EEPROM_ADDR_READ);
10124                 tw32(GRC_EEPROM_ADDR, val |
10125                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10126                         (addr & EEPROM_ADDR_ADDR_MASK) |
10127                         EEPROM_ADDR_START |
10128                         EEPROM_ADDR_WRITE);
10129
10130                 for (j = 0; j < 1000; j++) {
10131                         val = tr32(GRC_EEPROM_ADDR);
10132
10133                         if (val & EEPROM_ADDR_COMPLETE)
10134                                 break;
10135                         msleep(1);
10136                 }
10137                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10138                         rc = -EBUSY;
10139                         break;
10140                 }
10141         }
10142
10143         return rc;
10144 }
10145
10146 /* offset and length are dword aligned */
10147 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10148                 u8 *buf)
10149 {
10150         int ret = 0;
10151         u32 pagesize = tp->nvram_pagesize;
10152         u32 pagemask = pagesize - 1;
10153         u32 nvram_cmd;
10154         u8 *tmp;
10155
10156         tmp = kmalloc(pagesize, GFP_KERNEL);
10157         if (tmp == NULL)
10158                 return -ENOMEM;
10159
10160         while (len) {
10161                 int j;
10162                 u32 phy_addr, page_off, size;
10163
10164                 phy_addr = offset & ~pagemask;
10165
10166                 for (j = 0; j < pagesize; j += 4) {
10167                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
10168                                                 (u32 *) (tmp + j))))
10169                                 break;
10170                 }
10171                 if (ret)
10172                         break;
10173
10174                 page_off = offset & pagemask;
10175                 size = pagesize;
10176                 if (len < size)
10177                         size = len;
10178
10179                 len -= size;
10180
10181                 memcpy(tmp + page_off, buf, size);
10182
10183                 offset = offset + (pagesize - page_off);
10184
10185                 tg3_enable_nvram_access(tp);
10186
10187                 /*
10188                  * Before we can erase the flash page, we need
10189                  * to issue a special "write enable" command.
10190                  */
10191                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10192
10193                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10194                         break;
10195
10196                 /* Erase the target page */
10197                 tw32(NVRAM_ADDR, phy_addr);
10198
10199                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10200                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10201
10202                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10203                         break;
10204
10205                 /* Issue another write enable to start the write. */
10206                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10207
10208                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10209                         break;
10210
10211                 for (j = 0; j < pagesize; j += 4) {
10212                         u32 data;
10213
10214                         data = *((u32 *) (tmp + j));
10215                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
10216
10217                         tw32(NVRAM_ADDR, phy_addr + j);
10218
10219                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10220                                 NVRAM_CMD_WR;
10221
10222                         if (j == 0)
10223                                 nvram_cmd |= NVRAM_CMD_FIRST;
10224                         else if (j == (pagesize - 4))
10225                                 nvram_cmd |= NVRAM_CMD_LAST;
10226
10227                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10228                                 break;
10229                 }
10230                 if (ret)
10231                         break;
10232         }
10233
10234         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10235         tg3_nvram_exec_cmd(tp, nvram_cmd);
10236
10237         kfree(tmp);
10238
10239         return ret;
10240 }
10241
10242 /* offset and length are dword aligned */
10243 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10244                 u8 *buf)
10245 {
10246         int i, ret = 0;
10247
10248         for (i = 0; i < len; i += 4, offset += 4) {
10249                 u32 data, page_off, phy_addr, nvram_cmd;
10250
10251                 memcpy(&data, buf + i, 4);
10252                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10253
10254                 page_off = offset % tp->nvram_pagesize;
10255
10256                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10257
10258                 tw32(NVRAM_ADDR, phy_addr);
10259
10260                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10261
10262                 if ((page_off == 0) || (i == 0))
10263                         nvram_cmd |= NVRAM_CMD_FIRST;
10264                 if (page_off == (tp->nvram_pagesize - 4))
10265                         nvram_cmd |= NVRAM_CMD_LAST;
10266
10267                 if (i == (len - 4))
10268                         nvram_cmd |= NVRAM_CMD_LAST;
10269
10270                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10271                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10272                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10273                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10274                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10275                     (tp->nvram_jedecnum == JEDEC_ST) &&
10276                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10277
10278                         if ((ret = tg3_nvram_exec_cmd(tp,
10279                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10280                                 NVRAM_CMD_DONE)))
10281
10282                                 break;
10283                 }
10284                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10285                         /* We always do complete word writes to eeprom. */
10286                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10287                 }
10288
10289                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10290                         break;
10291         }
10292         return ret;
10293 }
10294
10295 /* offset and length are dword aligned */
10296 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10297 {
10298         int ret;
10299
10300         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10301                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10302                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10303                 udelay(40);
10304         }
10305
10306         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10307                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10308         }
10309         else {
10310                 u32 grc_mode;
10311
10312                 ret = tg3_nvram_lock(tp);
10313                 if (ret)
10314                         return ret;
10315
10316                 tg3_enable_nvram_access(tp);
10317                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10318                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10319                         tw32(NVRAM_WRITE1, 0x406);
10320
10321                 grc_mode = tr32(GRC_MODE);
10322                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10323
10324                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10325                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10326
10327                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10328                                 buf);
10329                 }
10330                 else {
10331                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10332                                 buf);
10333                 }
10334
10335                 grc_mode = tr32(GRC_MODE);
10336                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10337
10338                 tg3_disable_nvram_access(tp);
10339                 tg3_nvram_unlock(tp);
10340         }
10341
10342         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10343                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10344                 udelay(40);
10345         }
10346
10347         return ret;
10348 }
10349
10350 struct subsys_tbl_ent {
10351         u16 subsys_vendor, subsys_devid;
10352         u32 phy_id;
10353 };
10354
10355 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10356         /* Broadcom boards. */
10357         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10358         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10359         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10360         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10361         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10362         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10363         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10364         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10365         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10366         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10367         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10368
10369         /* 3com boards. */
10370         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10371         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10372         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10373         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10374         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10375
10376         /* DELL boards. */
10377         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10378         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10379         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10380         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10381
10382         /* Compaq boards. */
10383         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10384         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10385         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10386         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10387         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10388
10389         /* IBM boards. */
10390         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10391 };
10392
10393 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10394 {
10395         int i;
10396
10397         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10398                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10399                      tp->pdev->subsystem_vendor) &&
10400                     (subsys_id_to_phy_id[i].subsys_devid ==
10401                      tp->pdev->subsystem_device))
10402                         return &subsys_id_to_phy_id[i];
10403         }
10404         return NULL;
10405 }
10406
10407 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10408 {
10409         u32 val;
10410         u16 pmcsr;
10411
10412         /* On some early chips the SRAM cannot be accessed in D3hot state,
10413          * so need make sure we're in D0.
10414          */
10415         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10416         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10417         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10418         msleep(1);
10419
10420         /* Make sure register accesses (indirect or otherwise)
10421          * will function correctly.
10422          */
10423         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10424                                tp->misc_host_ctrl);
10425
10426         /* The memory arbiter has to be enabled in order for SRAM accesses
10427          * to succeed.  Normally on powerup the tg3 chip firmware will make
10428          * sure it is enabled, but other entities such as system netboot
10429          * code might disable it.
10430          */
10431         val = tr32(MEMARB_MODE);
10432         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10433
10434         tp->phy_id = PHY_ID_INVALID;
10435         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10436
10437         /* Assume an onboard device and WOL capable by default.  */
10438         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10439
10440         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10441                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10442                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10443                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10444                 }
10445                 val = tr32(VCPU_CFGSHDW);
10446                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10447                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10448                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10449                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10450                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10451                 return;
10452         }
10453
10454         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10455         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10456                 u32 nic_cfg, led_cfg;
10457                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10458                 int eeprom_phy_serdes = 0;
10459
10460                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10461                 tp->nic_sram_data_cfg = nic_cfg;
10462
10463                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10464                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10465                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10466                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10467                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10468                     (ver > 0) && (ver < 0x100))
10469                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10470
10471                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10472                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10473                         eeprom_phy_serdes = 1;
10474
10475                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10476                 if (nic_phy_id != 0) {
10477                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10478                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10479
10480                         eeprom_phy_id  = (id1 >> 16) << 10;
10481                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10482                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10483                 } else
10484                         eeprom_phy_id = 0;
10485
10486                 tp->phy_id = eeprom_phy_id;
10487                 if (eeprom_phy_serdes) {
10488                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10489                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10490                         else
10491                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10492                 }
10493
10494                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10495                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10496                                     SHASTA_EXT_LED_MODE_MASK);
10497                 else
10498                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10499
10500                 switch (led_cfg) {
10501                 default:
10502                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10503                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10504                         break;
10505
10506                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10507                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10508                         break;
10509
10510                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10511                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10512
10513                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10514                          * read on some older 5700/5701 bootcode.
10515                          */
10516                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10517                             ASIC_REV_5700 ||
10518                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10519                             ASIC_REV_5701)
10520                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10521
10522                         break;
10523
10524                 case SHASTA_EXT_LED_SHARED:
10525                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10526                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10527                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10528                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10529                                                  LED_CTRL_MODE_PHY_2);
10530                         break;
10531
10532                 case SHASTA_EXT_LED_MAC:
10533                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10534                         break;
10535
10536                 case SHASTA_EXT_LED_COMBO:
10537                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10538                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10539                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10540                                                  LED_CTRL_MODE_PHY_2);
10541                         break;
10542
10543                 };
10544
10545                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10546                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10547                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10548                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10549
10550                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10551                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10552                         if ((tp->pdev->subsystem_vendor ==
10553                              PCI_VENDOR_ID_ARIMA) &&
10554                             (tp->pdev->subsystem_device == 0x205a ||
10555                              tp->pdev->subsystem_device == 0x2063))
10556                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10557                 } else {
10558                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10559                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10560                 }
10561
10562                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10563                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10564                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10565                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10566                 }
10567                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10568                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10569                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10570                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10571                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10572
10573                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10574                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10575                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10576
10577                 if (cfg2 & (1 << 17))
10578                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10579
10580                 /* serdes signal pre-emphasis in register 0x590 set by */
10581                 /* bootcode if bit 18 is set */
10582                 if (cfg2 & (1 << 18))
10583                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10584
10585                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10586                         u32 cfg3;
10587
10588                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10589                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10590                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10591                 }
10592         }
10593 }
10594
10595 static int __devinit tg3_phy_probe(struct tg3 *tp)
10596 {
10597         u32 hw_phy_id_1, hw_phy_id_2;
10598         u32 hw_phy_id, hw_phy_id_masked;
10599         int err;
10600
10601         /* Reading the PHY ID register can conflict with ASF
10602          * firwmare access to the PHY hardware.
10603          */
10604         err = 0;
10605         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10606             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10607                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10608         } else {
10609                 /* Now read the physical PHY_ID from the chip and verify
10610                  * that it is sane.  If it doesn't look good, we fall back
10611                  * to either the hard-coded table based PHY_ID and failing
10612                  * that the value found in the eeprom area.
10613                  */
10614                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10615                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10616
10617                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10618                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10619                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10620
10621                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10622         }
10623
10624         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10625                 tp->phy_id = hw_phy_id;
10626                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10627                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10628                 else
10629                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10630         } else {
10631                 if (tp->phy_id != PHY_ID_INVALID) {
10632                         /* Do nothing, phy ID already set up in
10633                          * tg3_get_eeprom_hw_cfg().
10634                          */
10635                 } else {
10636                         struct subsys_tbl_ent *p;
10637
10638                         /* No eeprom signature?  Try the hardcoded
10639                          * subsys device table.
10640                          */
10641                         p = lookup_by_subsys(tp);
10642                         if (!p)
10643                                 return -ENODEV;
10644
10645                         tp->phy_id = p->phy_id;
10646                         if (!tp->phy_id ||
10647                             tp->phy_id == PHY_ID_BCM8002)
10648                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10649                 }
10650         }
10651
10652         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10653             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10654             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10655                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10656
10657                 tg3_readphy(tp, MII_BMSR, &bmsr);
10658                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10659                     (bmsr & BMSR_LSTATUS))
10660                         goto skip_phy_reset;
10661
10662                 err = tg3_phy_reset(tp);
10663                 if (err)
10664                         return err;
10665
10666                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10667                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10668                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10669                 tg3_ctrl = 0;
10670                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10671                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10672                                     MII_TG3_CTRL_ADV_1000_FULL);
10673                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10674                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10675                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10676                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10677                 }
10678
10679                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10680                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10681                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10682                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10683                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10684
10685                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10686                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10687
10688                         tg3_writephy(tp, MII_BMCR,
10689                                      BMCR_ANENABLE | BMCR_ANRESTART);
10690                 }
10691                 tg3_phy_set_wirespeed(tp);
10692
10693                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10694                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10695                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10696         }
10697
10698 skip_phy_reset:
10699         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10700                 err = tg3_init_5401phy_dsp(tp);
10701                 if (err)
10702                         return err;
10703         }
10704
10705         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10706                 err = tg3_init_5401phy_dsp(tp);
10707         }
10708
10709         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10710                 tp->link_config.advertising =
10711                         (ADVERTISED_1000baseT_Half |
10712                          ADVERTISED_1000baseT_Full |
10713                          ADVERTISED_Autoneg |
10714                          ADVERTISED_FIBRE);
10715         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10716                 tp->link_config.advertising &=
10717                         ~(ADVERTISED_1000baseT_Half |
10718                           ADVERTISED_1000baseT_Full);
10719
10720         return err;
10721 }
10722
10723 static void __devinit tg3_read_partno(struct tg3 *tp)
10724 {
10725         unsigned char vpd_data[256];
10726         unsigned int i;
10727         u32 magic;
10728
10729         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10730                 goto out_not_found;
10731
10732         if (magic == TG3_EEPROM_MAGIC) {
10733                 for (i = 0; i < 256; i += 4) {
10734                         u32 tmp;
10735
10736                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10737                                 goto out_not_found;
10738
10739                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10740                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10741                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10742                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10743                 }
10744         } else {
10745                 int vpd_cap;
10746
10747                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10748                 for (i = 0; i < 256; i += 4) {
10749                         u32 tmp, j = 0;
10750                         u16 tmp16;
10751
10752                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10753                                               i);
10754                         while (j++ < 100) {
10755                                 pci_read_config_word(tp->pdev, vpd_cap +
10756                                                      PCI_VPD_ADDR, &tmp16);
10757                                 if (tmp16 & 0x8000)
10758                                         break;
10759                                 msleep(1);
10760                         }
10761                         if (!(tmp16 & 0x8000))
10762                                 goto out_not_found;
10763
10764                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10765                                               &tmp);
10766                         tmp = cpu_to_le32(tmp);
10767                         memcpy(&vpd_data[i], &tmp, 4);
10768                 }
10769         }
10770
10771         /* Now parse and find the part number. */
10772         for (i = 0; i < 254; ) {
10773                 unsigned char val = vpd_data[i];
10774                 unsigned int block_end;
10775
10776                 if (val == 0x82 || val == 0x91) {
10777                         i = (i + 3 +
10778                              (vpd_data[i + 1] +
10779                               (vpd_data[i + 2] << 8)));
10780                         continue;
10781                 }
10782
10783                 if (val != 0x90)
10784                         goto out_not_found;
10785
10786                 block_end = (i + 3 +
10787                              (vpd_data[i + 1] +
10788                               (vpd_data[i + 2] << 8)));
10789                 i += 3;
10790
10791                 if (block_end > 256)
10792                         goto out_not_found;
10793
10794                 while (i < (block_end - 2)) {
10795                         if (vpd_data[i + 0] == 'P' &&
10796                             vpd_data[i + 1] == 'N') {
10797                                 int partno_len = vpd_data[i + 2];
10798
10799                                 i += 3;
10800                                 if (partno_len > 24 || (partno_len + i) > 256)
10801                                         goto out_not_found;
10802
10803                                 memcpy(tp->board_part_number,
10804                                        &vpd_data[i], partno_len);
10805
10806                                 /* Success. */
10807                                 return;
10808                         }
10809                         i += 3 + vpd_data[i + 2];
10810                 }
10811
10812                 /* Part number not found. */
10813                 goto out_not_found;
10814         }
10815
10816 out_not_found:
10817         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10818                 strcpy(tp->board_part_number, "BCM95906");
10819         else
10820                 strcpy(tp->board_part_number, "none");
10821 }
10822
10823 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10824 {
10825         u32 val, offset, start;
10826
10827         if (tg3_nvram_read_swab(tp, 0, &val))
10828                 return;
10829
10830         if (val != TG3_EEPROM_MAGIC)
10831                 return;
10832
10833         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10834             tg3_nvram_read_swab(tp, 0x4, &start))
10835                 return;
10836
10837         offset = tg3_nvram_logical_addr(tp, offset);
10838         if (tg3_nvram_read_swab(tp, offset, &val))
10839                 return;
10840
10841         if ((val & 0xfc000000) == 0x0c000000) {
10842                 u32 ver_offset, addr;
10843                 int i;
10844
10845                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10846                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10847                         return;
10848
10849                 if (val != 0)
10850                         return;
10851
10852                 addr = offset + ver_offset - start;
10853                 for (i = 0; i < 16; i += 4) {
10854                         if (tg3_nvram_read(tp, addr + i, &val))
10855                                 return;
10856
10857                         val = cpu_to_le32(val);
10858                         memcpy(tp->fw_ver + i, &val, 4);
10859                 }
10860         }
10861 }
10862
10863 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10864
10865 static int __devinit tg3_get_invariants(struct tg3 *tp)
10866 {
10867         static struct pci_device_id write_reorder_chipsets[] = {
10868                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10869                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10870                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10871                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10872                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10873                              PCI_DEVICE_ID_VIA_8385_0) },
10874                 { },
10875         };
10876         u32 misc_ctrl_reg;
10877         u32 cacheline_sz_reg;
10878         u32 pci_state_reg, grc_misc_cfg;
10879         u32 val;
10880         u16 pci_cmd;
10881         int err, pcie_cap;
10882
10883         /* Force memory write invalidate off.  If we leave it on,
10884          * then on 5700_BX chips we have to enable a workaround.
10885          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10886          * to match the cacheline size.  The Broadcom driver have this
10887          * workaround but turns MWI off all the times so never uses
10888          * it.  This seems to suggest that the workaround is insufficient.
10889          */
10890         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10891         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10892         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10893
10894         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10895          * has the register indirect write enable bit set before
10896          * we try to access any of the MMIO registers.  It is also
10897          * critical that the PCI-X hw workaround situation is decided
10898          * before that as well.
10899          */
10900         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10901                               &misc_ctrl_reg);
10902
10903         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10904                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10905         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10906                 u32 prod_id_asic_rev;
10907
10908                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10909                                       &prod_id_asic_rev);
10910                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10911         }
10912
10913         /* Wrong chip ID in 5752 A0. This code can be removed later
10914          * as A0 is not in production.
10915          */
10916         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10917                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10918
10919         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10920          * we need to disable memory and use config. cycles
10921          * only to access all registers. The 5702/03 chips
10922          * can mistakenly decode the special cycles from the
10923          * ICH chipsets as memory write cycles, causing corruption
10924          * of register and memory space. Only certain ICH bridges
10925          * will drive special cycles with non-zero data during the
10926          * address phase which can fall within the 5703's address
10927          * range. This is not an ICH bug as the PCI spec allows
10928          * non-zero address during special cycles. However, only
10929          * these ICH bridges are known to drive non-zero addresses
10930          * during special cycles.
10931          *
10932          * Since special cycles do not cross PCI bridges, we only
10933          * enable this workaround if the 5703 is on the secondary
10934          * bus of these ICH bridges.
10935          */
10936         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10937             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10938                 static struct tg3_dev_id {
10939                         u32     vendor;
10940                         u32     device;
10941                         u32     rev;
10942                 } ich_chipsets[] = {
10943                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10944                           PCI_ANY_ID },
10945                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10946                           PCI_ANY_ID },
10947                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10948                           0xa },
10949                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10950                           PCI_ANY_ID },
10951                         { },
10952                 };
10953                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10954                 struct pci_dev *bridge = NULL;
10955
10956                 while (pci_id->vendor != 0) {
10957                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10958                                                 bridge);
10959                         if (!bridge) {
10960                                 pci_id++;
10961                                 continue;
10962                         }
10963                         if (pci_id->rev != PCI_ANY_ID) {
10964                                 if (bridge->revision > pci_id->rev)
10965                                         continue;
10966                         }
10967                         if (bridge->subordinate &&
10968                             (bridge->subordinate->number ==
10969                              tp->pdev->bus->number)) {
10970
10971                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10972                                 pci_dev_put(bridge);
10973                                 break;
10974                         }
10975                 }
10976         }
10977
10978         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10979          * DMA addresses > 40-bit. This bridge may have other additional
10980          * 57xx devices behind it in some 4-port NIC designs for example.
10981          * Any tg3 device found behind the bridge will also need the 40-bit
10982          * DMA workaround.
10983          */
10984         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10985             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10986                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10987                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10988                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10989         }
10990         else {
10991                 struct pci_dev *bridge = NULL;
10992
10993                 do {
10994                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10995                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10996                                                 bridge);
10997                         if (bridge && bridge->subordinate &&
10998                             (bridge->subordinate->number <=
10999                              tp->pdev->bus->number) &&
11000                             (bridge->subordinate->subordinate >=
11001                              tp->pdev->bus->number)) {
11002                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11003                                 pci_dev_put(bridge);
11004                                 break;
11005                         }
11006                 } while (bridge);
11007         }
11008
11009         /* Initialize misc host control in PCI block. */
11010         tp->misc_host_ctrl |= (misc_ctrl_reg &
11011                                MISC_HOST_CTRL_CHIPREV);
11012         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11013                                tp->misc_host_ctrl);
11014
11015         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11016                               &cacheline_sz_reg);
11017
11018         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11019         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11020         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11021         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11022
11023         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11024             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11025                 tp->pdev_peer = tg3_find_peer(tp);
11026
11027         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11028             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11029             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11030             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11031             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11032             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11033             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11034             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11035                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11036
11037         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11038             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11039                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11040
11041         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11042                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11043                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11044                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11045                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11046                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11047                      tp->pdev_peer == tp->pdev))
11048                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11049
11050                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11051                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11052                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11053                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11054                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11055                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11056                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11057                 } else {
11058                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11059                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11060                                 ASIC_REV_5750 &&
11061                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11062                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11063                 }
11064         }
11065
11066         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11067             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11068             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11069             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11070             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11071             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11072             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11073             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11074                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11075
11076         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11077         if (pcie_cap != 0) {
11078                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11079                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11080                         u16 lnkctl;
11081
11082                         pci_read_config_word(tp->pdev,
11083                                              pcie_cap + PCI_EXP_LNKCTL,
11084                                              &lnkctl);
11085                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11086                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11087                 }
11088         }
11089
11090         /* If we have an AMD 762 or VIA K8T800 chipset, write
11091          * reordering to the mailbox registers done by the host
11092          * controller can cause major troubles.  We read back from
11093          * every mailbox register write to force the writes to be
11094          * posted to the chip in order.
11095          */
11096         if (pci_dev_present(write_reorder_chipsets) &&
11097             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11098                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11099
11100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11101             tp->pci_lat_timer < 64) {
11102                 tp->pci_lat_timer = 64;
11103
11104                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11105                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11106                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11107                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11108
11109                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11110                                        cacheline_sz_reg);
11111         }
11112
11113         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11114             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11115                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11116                 if (!tp->pcix_cap) {
11117                         printk(KERN_ERR PFX "Cannot find PCI-X "
11118                                             "capability, aborting.\n");
11119                         return -EIO;
11120                 }
11121         }
11122
11123         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11124                               &pci_state_reg);
11125
11126         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11127                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11128
11129                 /* If this is a 5700 BX chipset, and we are in PCI-X
11130                  * mode, enable register write workaround.
11131                  *
11132                  * The workaround is to use indirect register accesses
11133                  * for all chip writes not to mailbox registers.
11134                  */
11135                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11136                         u32 pm_reg;
11137
11138                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11139
11140                         /* The chip can have it's power management PCI config
11141                          * space registers clobbered due to this bug.
11142                          * So explicitly force the chip into D0 here.
11143                          */
11144                         pci_read_config_dword(tp->pdev,
11145                                               tp->pm_cap + PCI_PM_CTRL,
11146                                               &pm_reg);
11147                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11148                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11149                         pci_write_config_dword(tp->pdev,
11150                                                tp->pm_cap + PCI_PM_CTRL,
11151                                                pm_reg);
11152
11153                         /* Also, force SERR#/PERR# in PCI command. */
11154                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11155                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11156                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11157                 }
11158         }
11159
11160         /* 5700 BX chips need to have their TX producer index mailboxes
11161          * written twice to workaround a bug.
11162          */
11163         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11164                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11165
11166         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11167                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11168         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11169                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11170
11171         /* Chip-specific fixup from Broadcom driver */
11172         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11173             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11174                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11175                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11176         }
11177
11178         /* Default fast path register access methods */
11179         tp->read32 = tg3_read32;
11180         tp->write32 = tg3_write32;
11181         tp->read32_mbox = tg3_read32;
11182         tp->write32_mbox = tg3_write32;
11183         tp->write32_tx_mbox = tg3_write32;
11184         tp->write32_rx_mbox = tg3_write32;
11185
11186         /* Various workaround register access methods */
11187         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11188                 tp->write32 = tg3_write_indirect_reg32;
11189         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11190                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11191                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11192                 /*
11193                  * Back to back register writes can cause problems on these
11194                  * chips, the workaround is to read back all reg writes
11195                  * except those to mailbox regs.
11196                  *
11197                  * See tg3_write_indirect_reg32().
11198                  */
11199                 tp->write32 = tg3_write_flush_reg32;
11200         }
11201
11202
11203         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11204             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11205                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11206                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11207                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11208         }
11209
11210         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11211                 tp->read32 = tg3_read_indirect_reg32;
11212                 tp->write32 = tg3_write_indirect_reg32;
11213                 tp->read32_mbox = tg3_read_indirect_mbox;
11214                 tp->write32_mbox = tg3_write_indirect_mbox;
11215                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11216                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11217
11218                 iounmap(tp->regs);
11219                 tp->regs = NULL;
11220
11221                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11222                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11223                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11224         }
11225         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11226                 tp->read32_mbox = tg3_read32_mbox_5906;
11227                 tp->write32_mbox = tg3_write32_mbox_5906;
11228                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11229                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11230         }
11231
11232         if (tp->write32 == tg3_write_indirect_reg32 ||
11233             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11234              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11235               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11236                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11237
11238         /* Get eeprom hw config before calling tg3_set_power_state().
11239          * In particular, the TG3_FLG2_IS_NIC flag must be
11240          * determined before calling tg3_set_power_state() so that
11241          * we know whether or not to switch out of Vaux power.
11242          * When the flag is set, it means that GPIO1 is used for eeprom
11243          * write protect and also implies that it is a LOM where GPIOs
11244          * are not used to switch power.
11245          */
11246         tg3_get_eeprom_hw_cfg(tp);
11247
11248         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11249                 /* Allow reads and writes to the
11250                  * APE register and memory space.
11251                  */
11252                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11253                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11254                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11255                                        pci_state_reg);
11256         }
11257
11258         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11259             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11260                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11261
11262         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11263          * GPIO1 driven high will bring 5700's external PHY out of reset.
11264          * It is also used as eeprom write protect on LOMs.
11265          */
11266         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11267         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11268             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11269                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11270                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11271         /* Unused GPIO3 must be driven as output on 5752 because there
11272          * are no pull-up resistors on unused GPIO pins.
11273          */
11274         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11275                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11276
11277         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11278                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11279
11280         /* Force the chip into D0. */
11281         err = tg3_set_power_state(tp, PCI_D0);
11282         if (err) {
11283                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11284                        pci_name(tp->pdev));
11285                 return err;
11286         }
11287
11288         /* 5700 B0 chips do not support checksumming correctly due
11289          * to hardware bugs.
11290          */
11291         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11292                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11293
11294         /* Derive initial jumbo mode from MTU assigned in
11295          * ether_setup() via the alloc_etherdev() call
11296          */
11297         if (tp->dev->mtu > ETH_DATA_LEN &&
11298             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11299                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11300
11301         /* Determine WakeOnLan speed to use. */
11302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11303             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11304             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11305             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11306                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11307         } else {
11308                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11309         }
11310
11311         /* A few boards don't want Ethernet@WireSpeed phy feature */
11312         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11313             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11314              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11315              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11316             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11317             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11318                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11319
11320         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11321             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11322                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11323         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11324                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11325
11326         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11327                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11328                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11329                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11330                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11331                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11332                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11333                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11334                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11335                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11336                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11337                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11338         }
11339
11340         tp->coalesce_mode = 0;
11341         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11342             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11343                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11344
11345         /* Initialize MAC MI mode, polling disabled. */
11346         tw32_f(MAC_MI_MODE, tp->mi_mode);
11347         udelay(80);
11348
11349         /* Initialize data/descriptor byte/word swapping. */
11350         val = tr32(GRC_MODE);
11351         val &= GRC_MODE_HOST_STACKUP;
11352         tw32(GRC_MODE, val | tp->grc_mode);
11353
11354         tg3_switch_clocks(tp);
11355
11356         /* Clear this out for sanity. */
11357         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11358
11359         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11360                               &pci_state_reg);
11361         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11362             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11363                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11364
11365                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11366                     chiprevid == CHIPREV_ID_5701_B0 ||
11367                     chiprevid == CHIPREV_ID_5701_B2 ||
11368                     chiprevid == CHIPREV_ID_5701_B5) {
11369                         void __iomem *sram_base;
11370
11371                         /* Write some dummy words into the SRAM status block
11372                          * area, see if it reads back correctly.  If the return
11373                          * value is bad, force enable the PCIX workaround.
11374                          */
11375                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11376
11377                         writel(0x00000000, sram_base);
11378                         writel(0x00000000, sram_base + 4);
11379                         writel(0xffffffff, sram_base + 4);
11380                         if (readl(sram_base) != 0x00000000)
11381                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11382                 }
11383         }
11384
11385         udelay(50);
11386         tg3_nvram_init(tp);
11387
11388         grc_misc_cfg = tr32(GRC_MISC_CFG);
11389         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11390
11391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11392             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11393              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11394                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11395
11396         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11397             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11398                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11399         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11400                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11401                                       HOSTCC_MODE_CLRTICK_TXBD);
11402
11403                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11404                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11405                                        tp->misc_host_ctrl);
11406         }
11407
11408         /* these are limited to 10/100 only */
11409         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11410              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11411             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11412              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11413              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11414               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11415               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11416             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11417              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11418               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11419               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11420             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11421                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11422
11423         err = tg3_phy_probe(tp);
11424         if (err) {
11425                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11426                        pci_name(tp->pdev), err);
11427                 /* ... but do not return immediately ... */
11428         }
11429
11430         tg3_read_partno(tp);
11431         tg3_read_fw_ver(tp);
11432
11433         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11434                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11435         } else {
11436                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11437                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11438                 else
11439                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11440         }
11441
11442         /* 5700 {AX,BX} chips have a broken status block link
11443          * change bit implementation, so we must use the
11444          * status register in those cases.
11445          */
11446         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11447                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11448         else
11449                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11450
11451         /* The led_ctrl is set during tg3_phy_probe, here we might
11452          * have to force the link status polling mechanism based
11453          * upon subsystem IDs.
11454          */
11455         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11456             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11457             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11458                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11459                                   TG3_FLAG_USE_LINKCHG_REG);
11460         }
11461
11462         /* For all SERDES we poll the MAC status register. */
11463         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11464                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11465         else
11466                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11467
11468         /* All chips before 5787 can get confused if TX buffers
11469          * straddle the 4GB address boundary in some cases.
11470          */
11471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11472             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11473             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11474             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11476                 tp->dev->hard_start_xmit = tg3_start_xmit;
11477         else
11478                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11479
11480         tp->rx_offset = 2;
11481         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11482             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11483                 tp->rx_offset = 0;
11484
11485         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11486
11487         /* Increment the rx prod index on the rx std ring by at most
11488          * 8 for these chips to workaround hw errata.
11489          */
11490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11491             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11492             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11493                 tp->rx_std_max_post = 8;
11494
11495         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11496                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11497                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11498
11499         return err;
11500 }
11501
11502 #ifdef CONFIG_SPARC
11503 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11504 {
11505         struct net_device *dev = tp->dev;
11506         struct pci_dev *pdev = tp->pdev;
11507         struct device_node *dp = pci_device_to_OF_node(pdev);
11508         const unsigned char *addr;
11509         int len;
11510
11511         addr = of_get_property(dp, "local-mac-address", &len);
11512         if (addr && len == 6) {
11513                 memcpy(dev->dev_addr, addr, 6);
11514                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11515                 return 0;
11516         }
11517         return -ENODEV;
11518 }
11519
11520 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11521 {
11522         struct net_device *dev = tp->dev;
11523
11524         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11525         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11526         return 0;
11527 }
11528 #endif
11529
11530 static int __devinit tg3_get_device_address(struct tg3 *tp)
11531 {
11532         struct net_device *dev = tp->dev;
11533         u32 hi, lo, mac_offset;
11534         int addr_ok = 0;
11535
11536 #ifdef CONFIG_SPARC
11537         if (!tg3_get_macaddr_sparc(tp))
11538                 return 0;
11539 #endif
11540
11541         mac_offset = 0x7c;
11542         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11543             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11544                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11545                         mac_offset = 0xcc;
11546                 if (tg3_nvram_lock(tp))
11547                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11548                 else
11549                         tg3_nvram_unlock(tp);
11550         }
11551         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11552                 mac_offset = 0x10;
11553
11554         /* First try to get it from MAC address mailbox. */
11555         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11556         if ((hi >> 16) == 0x484b) {
11557                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11558                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11559
11560                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11561                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11562                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11563                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11564                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11565
11566                 /* Some old bootcode may report a 0 MAC address in SRAM */
11567                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11568         }
11569         if (!addr_ok) {
11570                 /* Next, try NVRAM. */
11571                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11572                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11573                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11574                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11575                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11576                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11577                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11578                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11579                 }
11580                 /* Finally just fetch it out of the MAC control regs. */
11581                 else {
11582                         hi = tr32(MAC_ADDR_0_HIGH);
11583                         lo = tr32(MAC_ADDR_0_LOW);
11584
11585                         dev->dev_addr[5] = lo & 0xff;
11586                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11587                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11588                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11589                         dev->dev_addr[1] = hi & 0xff;
11590                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11591                 }
11592         }
11593
11594         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11595 #ifdef CONFIG_SPARC64
11596                 if (!tg3_get_default_macaddr_sparc(tp))
11597                         return 0;
11598 #endif
11599                 return -EINVAL;
11600         }
11601         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11602         return 0;
11603 }
11604
11605 #define BOUNDARY_SINGLE_CACHELINE       1
11606 #define BOUNDARY_MULTI_CACHELINE        2
11607
11608 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11609 {
11610         int cacheline_size;
11611         u8 byte;
11612         int goal;
11613
11614         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11615         if (byte == 0)
11616                 cacheline_size = 1024;
11617         else
11618                 cacheline_size = (int) byte * 4;
11619
11620         /* On 5703 and later chips, the boundary bits have no
11621          * effect.
11622          */
11623         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11624             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11625             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11626                 goto out;
11627
11628 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11629         goal = BOUNDARY_MULTI_CACHELINE;
11630 #else
11631 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11632         goal = BOUNDARY_SINGLE_CACHELINE;
11633 #else
11634         goal = 0;
11635 #endif
11636 #endif
11637
11638         if (!goal)
11639                 goto out;
11640
11641         /* PCI controllers on most RISC systems tend to disconnect
11642          * when a device tries to burst across a cache-line boundary.
11643          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11644          *
11645          * Unfortunately, for PCI-E there are only limited
11646          * write-side controls for this, and thus for reads
11647          * we will still get the disconnects.  We'll also waste
11648          * these PCI cycles for both read and write for chips
11649          * other than 5700 and 5701 which do not implement the
11650          * boundary bits.
11651          */
11652         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11653             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11654                 switch (cacheline_size) {
11655                 case 16:
11656                 case 32:
11657                 case 64:
11658                 case 128:
11659                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11660                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11661                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11662                         } else {
11663                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11664                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11665                         }
11666                         break;
11667
11668                 case 256:
11669                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11670                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11671                         break;
11672
11673                 default:
11674                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11675                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11676                         break;
11677                 };
11678         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11679                 switch (cacheline_size) {
11680                 case 16:
11681                 case 32:
11682                 case 64:
11683                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11684                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11685                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11686                                 break;
11687                         }
11688                         /* fallthrough */
11689                 case 128:
11690                 default:
11691                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11692                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11693                         break;
11694                 };
11695         } else {
11696                 switch (cacheline_size) {
11697                 case 16:
11698                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11699                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11700                                         DMA_RWCTRL_WRITE_BNDRY_16);
11701                                 break;
11702                         }
11703                         /* fallthrough */
11704                 case 32:
11705                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11706                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11707                                         DMA_RWCTRL_WRITE_BNDRY_32);
11708                                 break;
11709                         }
11710                         /* fallthrough */
11711                 case 64:
11712                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11713                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11714                                         DMA_RWCTRL_WRITE_BNDRY_64);
11715                                 break;
11716                         }
11717                         /* fallthrough */
11718                 case 128:
11719                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11720                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11721                                         DMA_RWCTRL_WRITE_BNDRY_128);
11722                                 break;
11723                         }
11724                         /* fallthrough */
11725                 case 256:
11726                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11727                                 DMA_RWCTRL_WRITE_BNDRY_256);
11728                         break;
11729                 case 512:
11730                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11731                                 DMA_RWCTRL_WRITE_BNDRY_512);
11732                         break;
11733                 case 1024:
11734                 default:
11735                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11736                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11737                         break;
11738                 };
11739         }
11740
11741 out:
11742         return val;
11743 }
11744
11745 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11746 {
11747         struct tg3_internal_buffer_desc test_desc;
11748         u32 sram_dma_descs;
11749         int i, ret;
11750
11751         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11752
11753         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11754         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11755         tw32(RDMAC_STATUS, 0);
11756         tw32(WDMAC_STATUS, 0);
11757
11758         tw32(BUFMGR_MODE, 0);
11759         tw32(FTQ_RESET, 0);
11760
11761         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11762         test_desc.addr_lo = buf_dma & 0xffffffff;
11763         test_desc.nic_mbuf = 0x00002100;
11764         test_desc.len = size;
11765
11766         /*
11767          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11768          * the *second* time the tg3 driver was getting loaded after an
11769          * initial scan.
11770          *
11771          * Broadcom tells me:
11772          *   ...the DMA engine is connected to the GRC block and a DMA
11773          *   reset may affect the GRC block in some unpredictable way...
11774          *   The behavior of resets to individual blocks has not been tested.
11775          *
11776          * Broadcom noted the GRC reset will also reset all sub-components.
11777          */
11778         if (to_device) {
11779                 test_desc.cqid_sqid = (13 << 8) | 2;
11780
11781                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11782                 udelay(40);
11783         } else {
11784                 test_desc.cqid_sqid = (16 << 8) | 7;
11785
11786                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11787                 udelay(40);
11788         }
11789         test_desc.flags = 0x00000005;
11790
11791         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11792                 u32 val;
11793
11794                 val = *(((u32 *)&test_desc) + i);
11795                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11796                                        sram_dma_descs + (i * sizeof(u32)));
11797                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11798         }
11799         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11800
11801         if (to_device) {
11802                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11803         } else {
11804                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11805         }
11806
11807         ret = -ENODEV;
11808         for (i = 0; i < 40; i++) {
11809                 u32 val;
11810
11811                 if (to_device)
11812                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11813                 else
11814                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11815                 if ((val & 0xffff) == sram_dma_descs) {
11816                         ret = 0;
11817                         break;
11818                 }
11819
11820                 udelay(100);
11821         }
11822
11823         return ret;
11824 }
11825
11826 #define TEST_BUFFER_SIZE        0x2000
11827
11828 static int __devinit tg3_test_dma(struct tg3 *tp)
11829 {
11830         dma_addr_t buf_dma;
11831         u32 *buf, saved_dma_rwctrl;
11832         int ret;
11833
11834         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11835         if (!buf) {
11836                 ret = -ENOMEM;
11837                 goto out_nofree;
11838         }
11839
11840         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11841                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11842
11843         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11844
11845         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11846                 /* DMA read watermark not used on PCIE */
11847                 tp->dma_rwctrl |= 0x00180000;
11848         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11849                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11850                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11851                         tp->dma_rwctrl |= 0x003f0000;
11852                 else
11853                         tp->dma_rwctrl |= 0x003f000f;
11854         } else {
11855                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11856                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11857                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11858                         u32 read_water = 0x7;
11859
11860                         /* If the 5704 is behind the EPB bridge, we can
11861                          * do the less restrictive ONE_DMA workaround for
11862                          * better performance.
11863                          */
11864                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11865                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11866                                 tp->dma_rwctrl |= 0x8000;
11867                         else if (ccval == 0x6 || ccval == 0x7)
11868                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11869
11870                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11871                                 read_water = 4;
11872                         /* Set bit 23 to enable PCIX hw bug fix */
11873                         tp->dma_rwctrl |=
11874                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11875                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11876                                 (1 << 23);
11877                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11878                         /* 5780 always in PCIX mode */
11879                         tp->dma_rwctrl |= 0x00144000;
11880                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11881                         /* 5714 always in PCIX mode */
11882                         tp->dma_rwctrl |= 0x00148000;
11883                 } else {
11884                         tp->dma_rwctrl |= 0x001b000f;
11885                 }
11886         }
11887
11888         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11889             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11890                 tp->dma_rwctrl &= 0xfffffff0;
11891
11892         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11893             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11894                 /* Remove this if it causes problems for some boards. */
11895                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11896
11897                 /* On 5700/5701 chips, we need to set this bit.
11898                  * Otherwise the chip will issue cacheline transactions
11899                  * to streamable DMA memory with not all the byte
11900                  * enables turned on.  This is an error on several
11901                  * RISC PCI controllers, in particular sparc64.
11902                  *
11903                  * On 5703/5704 chips, this bit has been reassigned
11904                  * a different meaning.  In particular, it is used
11905                  * on those chips to enable a PCI-X workaround.
11906                  */
11907                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11908         }
11909
11910         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11911
11912 #if 0
11913         /* Unneeded, already done by tg3_get_invariants.  */
11914         tg3_switch_clocks(tp);
11915 #endif
11916
11917         ret = 0;
11918         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11919             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11920                 goto out;
11921
11922         /* It is best to perform DMA test with maximum write burst size
11923          * to expose the 5700/5701 write DMA bug.
11924          */
11925         saved_dma_rwctrl = tp->dma_rwctrl;
11926         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11927         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11928
11929         while (1) {
11930                 u32 *p = buf, i;
11931
11932                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11933                         p[i] = i;
11934
11935                 /* Send the buffer to the chip. */
11936                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11937                 if (ret) {
11938                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11939                         break;
11940                 }
11941
11942 #if 0
11943                 /* validate data reached card RAM correctly. */
11944                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11945                         u32 val;
11946                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11947                         if (le32_to_cpu(val) != p[i]) {
11948                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11949                                 /* ret = -ENODEV here? */
11950                         }
11951                         p[i] = 0;
11952                 }
11953 #endif
11954                 /* Now read it back. */
11955                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11956                 if (ret) {
11957                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11958
11959                         break;
11960                 }
11961
11962                 /* Verify it. */
11963                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11964                         if (p[i] == i)
11965                                 continue;
11966
11967                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11968                             DMA_RWCTRL_WRITE_BNDRY_16) {
11969                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11970                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11971                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11972                                 break;
11973                         } else {
11974                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11975                                 ret = -ENODEV;
11976                                 goto out;
11977                         }
11978                 }
11979
11980                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11981                         /* Success. */
11982                         ret = 0;
11983                         break;
11984                 }
11985         }
11986         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11987             DMA_RWCTRL_WRITE_BNDRY_16) {
11988                 static struct pci_device_id dma_wait_state_chipsets[] = {
11989                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11990                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11991                         { },
11992                 };
11993
11994                 /* DMA test passed without adjusting DMA boundary,
11995                  * now look for chipsets that are known to expose the
11996                  * DMA bug without failing the test.
11997                  */
11998                 if (pci_dev_present(dma_wait_state_chipsets)) {
11999                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12000                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12001                 }
12002                 else
12003                         /* Safe to use the calculated DMA boundary. */
12004                         tp->dma_rwctrl = saved_dma_rwctrl;
12005
12006                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12007         }
12008
12009 out:
12010         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12011 out_nofree:
12012         return ret;
12013 }
12014
12015 static void __devinit tg3_init_link_config(struct tg3 *tp)
12016 {
12017         tp->link_config.advertising =
12018                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12019                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12020                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12021                  ADVERTISED_Autoneg | ADVERTISED_MII);
12022         tp->link_config.speed = SPEED_INVALID;
12023         tp->link_config.duplex = DUPLEX_INVALID;
12024         tp->link_config.autoneg = AUTONEG_ENABLE;
12025         tp->link_config.active_speed = SPEED_INVALID;
12026         tp->link_config.active_duplex = DUPLEX_INVALID;
12027         tp->link_config.phy_is_low_power = 0;
12028         tp->link_config.orig_speed = SPEED_INVALID;
12029         tp->link_config.orig_duplex = DUPLEX_INVALID;
12030         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12031 }
12032
12033 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12034 {
12035         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12036                 tp->bufmgr_config.mbuf_read_dma_low_water =
12037                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12038                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12039                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12040                 tp->bufmgr_config.mbuf_high_water =
12041                         DEFAULT_MB_HIGH_WATER_5705;
12042                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12043                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12044                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12045                         tp->bufmgr_config.mbuf_high_water =
12046                                 DEFAULT_MB_HIGH_WATER_5906;
12047                 }
12048
12049                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12050                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12051                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12052                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12053                 tp->bufmgr_config.mbuf_high_water_jumbo =
12054                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12055         } else {
12056                 tp->bufmgr_config.mbuf_read_dma_low_water =
12057                         DEFAULT_MB_RDMA_LOW_WATER;
12058                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12059                         DEFAULT_MB_MACRX_LOW_WATER;
12060                 tp->bufmgr_config.mbuf_high_water =
12061                         DEFAULT_MB_HIGH_WATER;
12062
12063                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12064                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12065                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12066                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12067                 tp->bufmgr_config.mbuf_high_water_jumbo =
12068                         DEFAULT_MB_HIGH_WATER_JUMBO;
12069         }
12070
12071         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12072         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12073 }
12074
12075 static char * __devinit tg3_phy_string(struct tg3 *tp)
12076 {
12077         switch (tp->phy_id & PHY_ID_MASK) {
12078         case PHY_ID_BCM5400:    return "5400";
12079         case PHY_ID_BCM5401:    return "5401";
12080         case PHY_ID_BCM5411:    return "5411";
12081         case PHY_ID_BCM5701:    return "5701";
12082         case PHY_ID_BCM5703:    return "5703";
12083         case PHY_ID_BCM5704:    return "5704";
12084         case PHY_ID_BCM5705:    return "5705";
12085         case PHY_ID_BCM5750:    return "5750";
12086         case PHY_ID_BCM5752:    return "5752";
12087         case PHY_ID_BCM5714:    return "5714";
12088         case PHY_ID_BCM5780:    return "5780";
12089         case PHY_ID_BCM5755:    return "5755";
12090         case PHY_ID_BCM5787:    return "5787";
12091         case PHY_ID_BCM5784:    return "5784";
12092         case PHY_ID_BCM5756:    return "5722/5756";
12093         case PHY_ID_BCM5906:    return "5906";
12094         case PHY_ID_BCM5761:    return "5761";
12095         case PHY_ID_BCM8002:    return "8002/serdes";
12096         case 0:                 return "serdes";
12097         default:                return "unknown";
12098         };
12099 }
12100
12101 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12102 {
12103         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12104                 strcpy(str, "PCI Express");
12105                 return str;
12106         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12107                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12108
12109                 strcpy(str, "PCIX:");
12110
12111                 if ((clock_ctrl == 7) ||
12112                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12113                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12114                         strcat(str, "133MHz");
12115                 else if (clock_ctrl == 0)
12116                         strcat(str, "33MHz");
12117                 else if (clock_ctrl == 2)
12118                         strcat(str, "50MHz");
12119                 else if (clock_ctrl == 4)
12120                         strcat(str, "66MHz");
12121                 else if (clock_ctrl == 6)
12122                         strcat(str, "100MHz");
12123         } else {
12124                 strcpy(str, "PCI:");
12125                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12126                         strcat(str, "66MHz");
12127                 else
12128                         strcat(str, "33MHz");
12129         }
12130         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12131                 strcat(str, ":32-bit");
12132         else
12133                 strcat(str, ":64-bit");
12134         return str;
12135 }
12136
12137 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12138 {
12139         struct pci_dev *peer;
12140         unsigned int func, devnr = tp->pdev->devfn & ~7;
12141
12142         for (func = 0; func < 8; func++) {
12143                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12144                 if (peer && peer != tp->pdev)
12145                         break;
12146                 pci_dev_put(peer);
12147         }
12148         /* 5704 can be configured in single-port mode, set peer to
12149          * tp->pdev in that case.
12150          */
12151         if (!peer) {
12152                 peer = tp->pdev;
12153                 return peer;
12154         }
12155
12156         /*
12157          * We don't need to keep the refcount elevated; there's no way
12158          * to remove one half of this device without removing the other
12159          */
12160         pci_dev_put(peer);
12161
12162         return peer;
12163 }
12164
12165 static void __devinit tg3_init_coal(struct tg3 *tp)
12166 {
12167         struct ethtool_coalesce *ec = &tp->coal;
12168
12169         memset(ec, 0, sizeof(*ec));
12170         ec->cmd = ETHTOOL_GCOALESCE;
12171         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12172         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12173         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12174         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12175         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12176         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12177         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12178         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12179         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12180
12181         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12182                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12183                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12184                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12185                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12186                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12187         }
12188
12189         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12190                 ec->rx_coalesce_usecs_irq = 0;
12191                 ec->tx_coalesce_usecs_irq = 0;
12192                 ec->stats_block_coalesce_usecs = 0;
12193         }
12194 }
12195
12196 static int __devinit tg3_init_one(struct pci_dev *pdev,
12197                                   const struct pci_device_id *ent)
12198 {
12199         static int tg3_version_printed = 0;
12200         unsigned long tg3reg_base, tg3reg_len;
12201         struct net_device *dev;
12202         struct tg3 *tp;
12203         int i, err, pm_cap;
12204         char str[40];
12205         u64 dma_mask, persist_dma_mask;
12206
12207         if (tg3_version_printed++ == 0)
12208                 printk(KERN_INFO "%s", version);
12209
12210         err = pci_enable_device(pdev);
12211         if (err) {
12212                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12213                        "aborting.\n");
12214                 return err;
12215         }
12216
12217         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12218                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12219                        "base address, aborting.\n");
12220                 err = -ENODEV;
12221                 goto err_out_disable_pdev;
12222         }
12223
12224         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12225         if (err) {
12226                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12227                        "aborting.\n");
12228                 goto err_out_disable_pdev;
12229         }
12230
12231         pci_set_master(pdev);
12232
12233         /* Find power-management capability. */
12234         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12235         if (pm_cap == 0) {
12236                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12237                        "aborting.\n");
12238                 err = -EIO;
12239                 goto err_out_free_res;
12240         }
12241
12242         tg3reg_base = pci_resource_start(pdev, 0);
12243         tg3reg_len = pci_resource_len(pdev, 0);
12244
12245         dev = alloc_etherdev(sizeof(*tp));
12246         if (!dev) {
12247                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12248                 err = -ENOMEM;
12249                 goto err_out_free_res;
12250         }
12251
12252         SET_NETDEV_DEV(dev, &pdev->dev);
12253
12254 #if TG3_VLAN_TAG_USED
12255         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12256         dev->vlan_rx_register = tg3_vlan_rx_register;
12257 #endif
12258
12259         tp = netdev_priv(dev);
12260         tp->pdev = pdev;
12261         tp->dev = dev;
12262         tp->pm_cap = pm_cap;
12263         tp->mac_mode = TG3_DEF_MAC_MODE;
12264         tp->rx_mode = TG3_DEF_RX_MODE;
12265         tp->tx_mode = TG3_DEF_TX_MODE;
12266         tp->mi_mode = MAC_MI_MODE_BASE;
12267         if (tg3_debug > 0)
12268                 tp->msg_enable = tg3_debug;
12269         else
12270                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12271
12272         /* The word/byte swap controls here control register access byte
12273          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12274          * setting below.
12275          */
12276         tp->misc_host_ctrl =
12277                 MISC_HOST_CTRL_MASK_PCI_INT |
12278                 MISC_HOST_CTRL_WORD_SWAP |
12279                 MISC_HOST_CTRL_INDIR_ACCESS |
12280                 MISC_HOST_CTRL_PCISTATE_RW;
12281
12282         /* The NONFRM (non-frame) byte/word swap controls take effect
12283          * on descriptor entries, anything which isn't packet data.
12284          *
12285          * The StrongARM chips on the board (one for tx, one for rx)
12286          * are running in big-endian mode.
12287          */
12288         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12289                         GRC_MODE_WSWAP_NONFRM_DATA);
12290 #ifdef __BIG_ENDIAN
12291         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12292 #endif
12293         spin_lock_init(&tp->lock);
12294         spin_lock_init(&tp->indirect_lock);
12295         INIT_WORK(&tp->reset_task, tg3_reset_task);
12296
12297         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12298         if (!tp->regs) {
12299                 printk(KERN_ERR PFX "Cannot map device registers, "
12300                        "aborting.\n");
12301                 err = -ENOMEM;
12302                 goto err_out_free_dev;
12303         }
12304
12305         tg3_init_link_config(tp);
12306
12307         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12308         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12309         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12310
12311         dev->open = tg3_open;
12312         dev->stop = tg3_close;
12313         dev->get_stats = tg3_get_stats;
12314         dev->set_multicast_list = tg3_set_rx_mode;
12315         dev->set_mac_address = tg3_set_mac_addr;
12316         dev->do_ioctl = tg3_ioctl;
12317         dev->tx_timeout = tg3_tx_timeout;
12318         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12319         dev->ethtool_ops = &tg3_ethtool_ops;
12320         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12321         dev->change_mtu = tg3_change_mtu;
12322         dev->irq = pdev->irq;
12323 #ifdef CONFIG_NET_POLL_CONTROLLER
12324         dev->poll_controller = tg3_poll_controller;
12325 #endif
12326
12327         err = tg3_get_invariants(tp);
12328         if (err) {
12329                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12330                        "aborting.\n");
12331                 goto err_out_iounmap;
12332         }
12333
12334         /* The EPB bridge inside 5714, 5715, and 5780 and any
12335          * device behind the EPB cannot support DMA addresses > 40-bit.
12336          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12337          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12338          * do DMA address check in tg3_start_xmit().
12339          */
12340         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12341                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12342         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12343                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12344 #ifdef CONFIG_HIGHMEM
12345                 dma_mask = DMA_64BIT_MASK;
12346 #endif
12347         } else
12348                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12349
12350         /* Configure DMA attributes. */
12351         if (dma_mask > DMA_32BIT_MASK) {
12352                 err = pci_set_dma_mask(pdev, dma_mask);
12353                 if (!err) {
12354                         dev->features |= NETIF_F_HIGHDMA;
12355                         err = pci_set_consistent_dma_mask(pdev,
12356                                                           persist_dma_mask);
12357                         if (err < 0) {
12358                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12359                                        "DMA for consistent allocations\n");
12360                                 goto err_out_iounmap;
12361                         }
12362                 }
12363         }
12364         if (err || dma_mask == DMA_32BIT_MASK) {
12365                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12366                 if (err) {
12367                         printk(KERN_ERR PFX "No usable DMA configuration, "
12368                                "aborting.\n");
12369                         goto err_out_iounmap;
12370                 }
12371         }
12372
12373         tg3_init_bufmgr_config(tp);
12374
12375         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12376                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12377         }
12378         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12379             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12380             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12381             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12382             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12383                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12384         } else {
12385                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12386         }
12387
12388         /* TSO is on by default on chips that support hardware TSO.
12389          * Firmware TSO on older chips gives lower performance, so it
12390          * is off by default, but can be enabled using ethtool.
12391          */
12392         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12393                 dev->features |= NETIF_F_TSO;
12394                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12395                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12396                         dev->features |= NETIF_F_TSO6;
12397                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12398                         dev->features |= NETIF_F_TSO_ECN;
12399         }
12400
12401
12402         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12403             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12404             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12405                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12406                 tp->rx_pending = 63;
12407         }
12408
12409         err = tg3_get_device_address(tp);
12410         if (err) {
12411                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12412                        "aborting.\n");
12413                 goto err_out_iounmap;
12414         }
12415
12416         /*
12417          * Reset chip in case UNDI or EFI driver did not shutdown
12418          * DMA self test will enable WDMAC and we'll see (spurious)
12419          * pending DMA on the PCI bus at that point.
12420          */
12421         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12422             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12423                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12424                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12425         }
12426
12427         err = tg3_test_dma(tp);
12428         if (err) {
12429                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12430                 goto err_out_iounmap;
12431         }
12432
12433         /* Tigon3 can do ipv4 only... and some chips have buggy
12434          * checksumming.
12435          */
12436         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12437                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12438                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12439                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12440                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12441                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12442                         dev->features |= NETIF_F_IPV6_CSUM;
12443
12444                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12445         } else
12446                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12447
12448         /* flow control autonegotiation is default behavior */
12449         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12450
12451         tg3_init_coal(tp);
12452
12453         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12454                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12455                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12456                                "base address for APE, aborting.\n");
12457                         err = -ENODEV;
12458                         goto err_out_iounmap;
12459                 }
12460
12461                 tg3reg_base = pci_resource_start(pdev, 2);
12462                 tg3reg_len = pci_resource_len(pdev, 2);
12463
12464                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12465                 if (tp->aperegs == 0UL) {
12466                         printk(KERN_ERR PFX "Cannot map APE registers, "
12467                                "aborting.\n");
12468                         err = -ENOMEM;
12469                         goto err_out_iounmap;
12470                 }
12471
12472                 tg3_ape_lock_init(tp);
12473         }
12474
12475         pci_set_drvdata(pdev, dev);
12476
12477         err = register_netdev(dev);
12478         if (err) {
12479                 printk(KERN_ERR PFX "Cannot register net device, "
12480                        "aborting.\n");
12481                 goto err_out_apeunmap;
12482         }
12483
12484         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12485                dev->name,
12486                tp->board_part_number,
12487                tp->pci_chip_rev_id,
12488                tg3_phy_string(tp),
12489                tg3_bus_string(tp, str),
12490                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12491                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12492                  "10/100/1000Base-T")));
12493
12494         for (i = 0; i < 6; i++)
12495                 printk("%2.2x%c", dev->dev_addr[i],
12496                        i == 5 ? '\n' : ':');
12497
12498         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12499                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12500                dev->name,
12501                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12502                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12503                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12504                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12505                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12506                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12507         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12508                dev->name, tp->dma_rwctrl,
12509                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12510                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12511
12512         return 0;
12513
12514 err_out_apeunmap:
12515         if (tp->aperegs) {
12516                 iounmap(tp->aperegs);
12517                 tp->aperegs = NULL;
12518         }
12519
12520 err_out_iounmap:
12521         if (tp->regs) {
12522                 iounmap(tp->regs);
12523                 tp->regs = NULL;
12524         }
12525
12526 err_out_free_dev:
12527         free_netdev(dev);
12528
12529 err_out_free_res:
12530         pci_release_regions(pdev);
12531
12532 err_out_disable_pdev:
12533         pci_disable_device(pdev);
12534         pci_set_drvdata(pdev, NULL);
12535         return err;
12536 }
12537
12538 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12539 {
12540         struct net_device *dev = pci_get_drvdata(pdev);
12541
12542         if (dev) {
12543                 struct tg3 *tp = netdev_priv(dev);
12544
12545                 flush_scheduled_work();
12546                 unregister_netdev(dev);
12547                 if (tp->aperegs) {
12548                         iounmap(tp->aperegs);
12549                         tp->aperegs = NULL;
12550                 }
12551                 if (tp->regs) {
12552                         iounmap(tp->regs);
12553                         tp->regs = NULL;
12554                 }
12555                 free_netdev(dev);
12556                 pci_release_regions(pdev);
12557                 pci_disable_device(pdev);
12558                 pci_set_drvdata(pdev, NULL);
12559         }
12560 }
12561
12562 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12563 {
12564         struct net_device *dev = pci_get_drvdata(pdev);
12565         struct tg3 *tp = netdev_priv(dev);
12566         int err;
12567
12568         /* PCI register 4 needs to be saved whether netif_running() or not.
12569          * MSI address and data need to be saved if using MSI and
12570          * netif_running().
12571          */
12572         pci_save_state(pdev);
12573
12574         if (!netif_running(dev))
12575                 return 0;
12576
12577         flush_scheduled_work();
12578         tg3_netif_stop(tp);
12579
12580         del_timer_sync(&tp->timer);
12581
12582         tg3_full_lock(tp, 1);
12583         tg3_disable_ints(tp);
12584         tg3_full_unlock(tp);
12585
12586         netif_device_detach(dev);
12587
12588         tg3_full_lock(tp, 0);
12589         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12590         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12591         tg3_full_unlock(tp);
12592
12593         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12594         if (err) {
12595                 tg3_full_lock(tp, 0);
12596
12597                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12598                 if (tg3_restart_hw(tp, 1))
12599                         goto out;
12600
12601                 tp->timer.expires = jiffies + tp->timer_offset;
12602                 add_timer(&tp->timer);
12603
12604                 netif_device_attach(dev);
12605                 tg3_netif_start(tp);
12606
12607 out:
12608                 tg3_full_unlock(tp);
12609         }
12610
12611         return err;
12612 }
12613
12614 static int tg3_resume(struct pci_dev *pdev)
12615 {
12616         struct net_device *dev = pci_get_drvdata(pdev);
12617         struct tg3 *tp = netdev_priv(dev);
12618         int err;
12619
12620         pci_restore_state(tp->pdev);
12621
12622         if (!netif_running(dev))
12623                 return 0;
12624
12625         err = tg3_set_power_state(tp, PCI_D0);
12626         if (err)
12627                 return err;
12628
12629         /* Hardware bug - MSI won't work if INTX disabled. */
12630         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
12631             (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
12632                 pci_intx(tp->pdev, 1);
12633
12634         netif_device_attach(dev);
12635
12636         tg3_full_lock(tp, 0);
12637
12638         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12639         err = tg3_restart_hw(tp, 1);
12640         if (err)
12641                 goto out;
12642
12643         tp->timer.expires = jiffies + tp->timer_offset;
12644         add_timer(&tp->timer);
12645
12646         tg3_netif_start(tp);
12647
12648 out:
12649         tg3_full_unlock(tp);
12650
12651         return err;
12652 }
12653
12654 static struct pci_driver tg3_driver = {
12655         .name           = DRV_MODULE_NAME,
12656         .id_table       = tg3_pci_tbl,
12657         .probe          = tg3_init_one,
12658         .remove         = __devexit_p(tg3_remove_one),
12659         .suspend        = tg3_suspend,
12660         .resume         = tg3_resume
12661 };
12662
12663 static int __init tg3_init(void)
12664 {
12665         return pci_register_driver(&tg3_driver);
12666 }
12667
12668 static void __exit tg3_cleanup(void)
12669 {
12670         pci_unregister_driver(&tg3_driver);
12671 }
12672
12673 module_init(tg3_init);
12674 module_exit(tg3_cleanup);