[TG3]: Replace some magic 5704S constants
[safe/jmp/linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.86"
68 #define DRV_MODULE_RELDATE      "November 9, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808 {
809         u32 phy;
810
811         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813                 return;
814
815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816                 u32 ephy;
817
818                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
820                                      ephy | MII_TG3_EPHY_SHADOW_EN);
821                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822                                 if (enable)
823                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824                                 else
825                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827                         }
828                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829                 }
830         } else {
831                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832                       MII_TG3_AUXCTL_SHDWSEL_MISC;
833                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835                         if (enable)
836                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837                         else
838                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839                         phy |= MII_TG3_AUXCTL_MISC_WREN;
840                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841                 }
842         }
843 }
844
845 static void tg3_phy_set_wirespeed(struct tg3 *tp)
846 {
847         u32 val;
848
849         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850                 return;
851
852         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855                              (val | (1 << 15) | (1 << 4)));
856 }
857
858 static int tg3_bmcr_reset(struct tg3 *tp)
859 {
860         u32 phy_control;
861         int limit, err;
862
863         /* OK, reset it, and poll the BMCR_RESET bit until it
864          * clears or we time out.
865          */
866         phy_control = BMCR_RESET;
867         err = tg3_writephy(tp, MII_BMCR, phy_control);
868         if (err != 0)
869                 return -EBUSY;
870
871         limit = 5000;
872         while (limit--) {
873                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874                 if (err != 0)
875                         return -EBUSY;
876
877                 if ((phy_control & BMCR_RESET) == 0) {
878                         udelay(40);
879                         break;
880                 }
881                 udelay(10);
882         }
883         if (limit <= 0)
884                 return -EBUSY;
885
886         return 0;
887 }
888
889 static int tg3_wait_macro_done(struct tg3 *tp)
890 {
891         int limit = 100;
892
893         while (limit--) {
894                 u32 tmp32;
895
896                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897                         if ((tmp32 & 0x1000) == 0)
898                                 break;
899                 }
900         }
901         if (limit <= 0)
902                 return -EBUSY;
903
904         return 0;
905 }
906
907 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 {
909         static const u32 test_pat[4][6] = {
910         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914         };
915         int chan;
916
917         for (chan = 0; chan < 4; chan++) {
918                 int i;
919
920                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921                              (chan * 0x2000) | 0x0200);
922                 tg3_writephy(tp, 0x16, 0x0002);
923
924                 for (i = 0; i < 6; i++)
925                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926                                      test_pat[chan][i]);
927
928                 tg3_writephy(tp, 0x16, 0x0202);
929                 if (tg3_wait_macro_done(tp)) {
930                         *resetp = 1;
931                         return -EBUSY;
932                 }
933
934                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935                              (chan * 0x2000) | 0x0200);
936                 tg3_writephy(tp, 0x16, 0x0082);
937                 if (tg3_wait_macro_done(tp)) {
938                         *resetp = 1;
939                         return -EBUSY;
940                 }
941
942                 tg3_writephy(tp, 0x16, 0x0802);
943                 if (tg3_wait_macro_done(tp)) {
944                         *resetp = 1;
945                         return -EBUSY;
946                 }
947
948                 for (i = 0; i < 6; i += 2) {
949                         u32 low, high;
950
951                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953                             tg3_wait_macro_done(tp)) {
954                                 *resetp = 1;
955                                 return -EBUSY;
956                         }
957                         low &= 0x7fff;
958                         high &= 0x000f;
959                         if (low != test_pat[chan][i] ||
960                             high != test_pat[chan][i+1]) {
961                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965                                 return -EBUSY;
966                         }
967                 }
968         }
969
970         return 0;
971 }
972
973 static int tg3_phy_reset_chanpat(struct tg3 *tp)
974 {
975         int chan;
976
977         for (chan = 0; chan < 4; chan++) {
978                 int i;
979
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981                              (chan * 0x2000) | 0x0200);
982                 tg3_writephy(tp, 0x16, 0x0002);
983                 for (i = 0; i < 6; i++)
984                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985                 tg3_writephy(tp, 0x16, 0x0202);
986                 if (tg3_wait_macro_done(tp))
987                         return -EBUSY;
988         }
989
990         return 0;
991 }
992
993 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 {
995         u32 reg32, phy9_orig;
996         int retries, do_phy_reset, err;
997
998         retries = 10;
999         do_phy_reset = 1;
1000         do {
1001                 if (do_phy_reset) {
1002                         err = tg3_bmcr_reset(tp);
1003                         if (err)
1004                                 return err;
1005                         do_phy_reset = 0;
1006                 }
1007
1008                 /* Disable transmitter and interrupt.  */
1009                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010                         continue;
1011
1012                 reg32 |= 0x3000;
1013                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015                 /* Set full-duplex, 1000 mbps.  */
1016                 tg3_writephy(tp, MII_BMCR,
1017                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019                 /* Set to master mode.  */
1020                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021                         continue;
1022
1023                 tg3_writephy(tp, MII_TG3_CTRL,
1024                              (MII_TG3_CTRL_AS_MASTER |
1025                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027                 /* Enable SM_DSP_CLOCK and 6dB.  */
1028                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030                 /* Block the PHY control access.  */
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035                 if (!err)
1036                         break;
1037         } while (--retries);
1038
1039         err = tg3_phy_reset_chanpat(tp);
1040         if (err)
1041                 return err;
1042
1043         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047         tg3_writephy(tp, 0x16, 0x0000);
1048
1049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051                 /* Set Extended packet length bit for jumbo frames */
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053         }
1054         else {
1055                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056         }
1057
1058         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061                 reg32 &= ~0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063         } else if (!err)
1064                 err = -EBUSY;
1065
1066         return err;
1067 }
1068
1069 static void tg3_link_report(struct tg3 *);
1070
1071 /* This will reset the tigon3 PHY if there is no valid
1072  * link unless the FORCE argument is non-zero.
1073  */
1074 static int tg3_phy_reset(struct tg3 *tp)
1075 {
1076         u32 phy_status;
1077         int err;
1078
1079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080                 u32 val;
1081
1082                 val = tr32(GRC_MISC_CFG);
1083                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084                 udelay(40);
1085         }
1086         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1087         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088         if (err != 0)
1089                 return -EBUSY;
1090
1091         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092                 netif_carrier_off(tp->dev);
1093                 tg3_link_report(tp);
1094         }
1095
1096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099                 err = tg3_phy_reset_5703_4_5(tp);
1100                 if (err)
1101                         return err;
1102                 goto out;
1103         }
1104
1105         err = tg3_bmcr_reset(tp);
1106         if (err)
1107                 return err;
1108
1109         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1110                 u32 val;
1111
1112                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1113                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1114                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1115                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1116                         udelay(40);
1117                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1118                 }
1119
1120                 /* Disable GPHY autopowerdown. */
1121                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1122                              MII_TG3_MISC_SHDW_WREN |
1123                              MII_TG3_MISC_SHDW_APD_SEL |
1124                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1125         }
1126
1127 out:
1128         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1129                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1130                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1131                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1132                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1133                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1134                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1135         }
1136         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1137                 tg3_writephy(tp, 0x1c, 0x8d68);
1138                 tg3_writephy(tp, 0x1c, 0x8d68);
1139         }
1140         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1141                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1142                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1143                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1144                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1145                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1146                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1147                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1148                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1149         }
1150         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1151                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1152                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1153                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1154                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1155                         tg3_writephy(tp, MII_TG3_TEST1,
1156                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1157                 } else
1158                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1159                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1160         }
1161         /* Set Extended packet length bit (bit 14) on all chips that */
1162         /* support jumbo frames */
1163         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1164                 /* Cannot do read-modify-write on 5401 */
1165                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1166         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1167                 u32 phy_reg;
1168
1169                 /* Set bit 14 with read-modify-write to preserve other bits */
1170                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1171                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1172                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1173         }
1174
1175         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1176          * jumbo frames transmission.
1177          */
1178         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1179                 u32 phy_reg;
1180
1181                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1182                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1183                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1184         }
1185
1186         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1187                 /* adjust output voltage */
1188                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1189         }
1190
1191         tg3_phy_toggle_automdix(tp, 1);
1192         tg3_phy_set_wirespeed(tp);
1193         return 0;
1194 }
1195
1196 static void tg3_frob_aux_power(struct tg3 *tp)
1197 {
1198         struct tg3 *tp_peer = tp;
1199
1200         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1201                 return;
1202
1203         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1204             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1205                 struct net_device *dev_peer;
1206
1207                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1208                 /* remove_one() may have been run on the peer. */
1209                 if (!dev_peer)
1210                         tp_peer = tp;
1211                 else
1212                         tp_peer = netdev_priv(dev_peer);
1213         }
1214
1215         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1216             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1217             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1218             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1219                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1220                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1221                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222                                     (GRC_LCLCTRL_GPIO_OE0 |
1223                                      GRC_LCLCTRL_GPIO_OE1 |
1224                                      GRC_LCLCTRL_GPIO_OE2 |
1225                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1226                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1227                                     100);
1228                 } else {
1229                         u32 no_gpio2;
1230                         u32 grc_local_ctrl = 0;
1231
1232                         if (tp_peer != tp &&
1233                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1234                                 return;
1235
1236                         /* Workaround to prevent overdrawing Amps. */
1237                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1238                             ASIC_REV_5714) {
1239                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1240                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1241                                             grc_local_ctrl, 100);
1242                         }
1243
1244                         /* On 5753 and variants, GPIO2 cannot be used. */
1245                         no_gpio2 = tp->nic_sram_data_cfg &
1246                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1247
1248                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1249                                          GRC_LCLCTRL_GPIO_OE1 |
1250                                          GRC_LCLCTRL_GPIO_OE2 |
1251                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1252                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1253                         if (no_gpio2) {
1254                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1255                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1256                         }
1257                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1258                                                     grc_local_ctrl, 100);
1259
1260                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1261
1262                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1263                                                     grc_local_ctrl, 100);
1264
1265                         if (!no_gpio2) {
1266                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1267                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1268                                             grc_local_ctrl, 100);
1269                         }
1270                 }
1271         } else {
1272                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1273                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1274                         if (tp_peer != tp &&
1275                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1276                                 return;
1277
1278                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1279                                     (GRC_LCLCTRL_GPIO_OE1 |
1280                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1281
1282                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1283                                     GRC_LCLCTRL_GPIO_OE1, 100);
1284
1285                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1286                                     (GRC_LCLCTRL_GPIO_OE1 |
1287                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1288                 }
1289         }
1290 }
1291
1292 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1293 {
1294         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1295                 return 1;
1296         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1297                 if (speed != SPEED_10)
1298                         return 1;
1299         } else if (speed == SPEED_10)
1300                 return 1;
1301
1302         return 0;
1303 }
1304
1305 static int tg3_setup_phy(struct tg3 *, int);
1306
1307 #define RESET_KIND_SHUTDOWN     0
1308 #define RESET_KIND_INIT         1
1309 #define RESET_KIND_SUSPEND      2
1310
1311 static void tg3_write_sig_post_reset(struct tg3 *, int);
1312 static int tg3_halt_cpu(struct tg3 *, u32);
1313 static int tg3_nvram_lock(struct tg3 *);
1314 static void tg3_nvram_unlock(struct tg3 *);
1315
1316 static void tg3_power_down_phy(struct tg3 *tp)
1317 {
1318         u32 val;
1319
1320         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1321                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1322                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1323                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1324
1325                         sg_dig_ctrl |=
1326                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1327                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1328                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1329                 }
1330                 return;
1331         }
1332
1333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1334                 tg3_bmcr_reset(tp);
1335                 val = tr32(GRC_MISC_CFG);
1336                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1337                 udelay(40);
1338                 return;
1339         } else {
1340                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1341                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1342                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1343         }
1344
1345         /* The PHY should not be powered down on some chips because
1346          * of bugs.
1347          */
1348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1350             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1351              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1352                 return;
1353
1354         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1355                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1356                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1357                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1358                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1359         }
1360
1361         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1362 }
1363
1364 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1365 {
1366         u32 misc_host_ctrl;
1367         u16 power_control, power_caps;
1368         int pm = tp->pm_cap;
1369
1370         /* Make sure register accesses (indirect or otherwise)
1371          * will function correctly.
1372          */
1373         pci_write_config_dword(tp->pdev,
1374                                TG3PCI_MISC_HOST_CTRL,
1375                                tp->misc_host_ctrl);
1376
1377         pci_read_config_word(tp->pdev,
1378                              pm + PCI_PM_CTRL,
1379                              &power_control);
1380         power_control |= PCI_PM_CTRL_PME_STATUS;
1381         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1382         switch (state) {
1383         case PCI_D0:
1384                 power_control |= 0;
1385                 pci_write_config_word(tp->pdev,
1386                                       pm + PCI_PM_CTRL,
1387                                       power_control);
1388                 udelay(100);    /* Delay after power state change */
1389
1390                 /* Switch out of Vaux if it is a NIC */
1391                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1392                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1393
1394                 return 0;
1395
1396         case PCI_D1:
1397                 power_control |= 1;
1398                 break;
1399
1400         case PCI_D2:
1401                 power_control |= 2;
1402                 break;
1403
1404         case PCI_D3hot:
1405                 power_control |= 3;
1406                 break;
1407
1408         default:
1409                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1410                        "requested.\n",
1411                        tp->dev->name, state);
1412                 return -EINVAL;
1413         };
1414
1415         power_control |= PCI_PM_CTRL_PME_ENABLE;
1416
1417         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1418         tw32(TG3PCI_MISC_HOST_CTRL,
1419              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1420
1421         if (tp->link_config.phy_is_low_power == 0) {
1422                 tp->link_config.phy_is_low_power = 1;
1423                 tp->link_config.orig_speed = tp->link_config.speed;
1424                 tp->link_config.orig_duplex = tp->link_config.duplex;
1425                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1426         }
1427
1428         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1429                 tp->link_config.speed = SPEED_10;
1430                 tp->link_config.duplex = DUPLEX_HALF;
1431                 tp->link_config.autoneg = AUTONEG_ENABLE;
1432                 tg3_setup_phy(tp, 0);
1433         }
1434
1435         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1436                 u32 val;
1437
1438                 val = tr32(GRC_VCPU_EXT_CTRL);
1439                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1440         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1441                 int i;
1442                 u32 val;
1443
1444                 for (i = 0; i < 200; i++) {
1445                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1446                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1447                                 break;
1448                         msleep(1);
1449                 }
1450         }
1451         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1452                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1453                                                      WOL_DRV_STATE_SHUTDOWN |
1454                                                      WOL_DRV_WOL |
1455                                                      WOL_SET_MAGIC_PKT);
1456
1457         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1458
1459         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1460                 u32 mac_mode;
1461
1462                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1463                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1464                         udelay(40);
1465
1466                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1467                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1468                         else
1469                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1470
1471                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1472                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1473                             ASIC_REV_5700) {
1474                                 u32 speed = (tp->tg3_flags &
1475                                              TG3_FLAG_WOL_SPEED_100MB) ?
1476                                              SPEED_100 : SPEED_10;
1477                                 if (tg3_5700_link_polarity(tp, speed))
1478                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1479                                 else
1480                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1481                         }
1482                 } else {
1483                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1484                 }
1485
1486                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1487                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1488
1489                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1490                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1491                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1492
1493                 tw32_f(MAC_MODE, mac_mode);
1494                 udelay(100);
1495
1496                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1497                 udelay(10);
1498         }
1499
1500         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1501             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1502              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1503                 u32 base_val;
1504
1505                 base_val = tp->pci_clock_ctrl;
1506                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1507                              CLOCK_CTRL_TXCLK_DISABLE);
1508
1509                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1510                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1511         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1512                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1513                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1514                 /* do nothing */
1515         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1516                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1517                 u32 newbits1, newbits2;
1518
1519                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1520                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1521                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1522                                     CLOCK_CTRL_TXCLK_DISABLE |
1523                                     CLOCK_CTRL_ALTCLK);
1524                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1525                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1526                         newbits1 = CLOCK_CTRL_625_CORE;
1527                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1528                 } else {
1529                         newbits1 = CLOCK_CTRL_ALTCLK;
1530                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1531                 }
1532
1533                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1534                             40);
1535
1536                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1537                             40);
1538
1539                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1540                         u32 newbits3;
1541
1542                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1543                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1544                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1545                                             CLOCK_CTRL_TXCLK_DISABLE |
1546                                             CLOCK_CTRL_44MHZ_CORE);
1547                         } else {
1548                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1549                         }
1550
1551                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1552                                     tp->pci_clock_ctrl | newbits3, 40);
1553                 }
1554         }
1555
1556         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1557             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1558             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1559                 tg3_power_down_phy(tp);
1560
1561         tg3_frob_aux_power(tp);
1562
1563         /* Workaround for unstable PLL clock */
1564         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1565             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1566                 u32 val = tr32(0x7d00);
1567
1568                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1569                 tw32(0x7d00, val);
1570                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1571                         int err;
1572
1573                         err = tg3_nvram_lock(tp);
1574                         tg3_halt_cpu(tp, RX_CPU_BASE);
1575                         if (!err)
1576                                 tg3_nvram_unlock(tp);
1577                 }
1578         }
1579
1580         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1581
1582         /* Finally, set the new power state. */
1583         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1584         udelay(100);    /* Delay after power state change */
1585
1586         return 0;
1587 }
1588
1589 static void tg3_link_report(struct tg3 *tp)
1590 {
1591         if (!netif_carrier_ok(tp->dev)) {
1592                 if (netif_msg_link(tp))
1593                         printk(KERN_INFO PFX "%s: Link is down.\n",
1594                                tp->dev->name);
1595         } else if (netif_msg_link(tp)) {
1596                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1597                        tp->dev->name,
1598                        (tp->link_config.active_speed == SPEED_1000 ?
1599                         1000 :
1600                         (tp->link_config.active_speed == SPEED_100 ?
1601                          100 : 10)),
1602                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1603                         "full" : "half"));
1604
1605                 printk(KERN_INFO PFX
1606                        "%s: Flow control is %s for TX and %s for RX.\n",
1607                        tp->dev->name,
1608                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1609                        "on" : "off",
1610                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1611                        "on" : "off");
1612         }
1613 }
1614
1615 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1616 {
1617         u16 miireg;
1618
1619         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1620                 miireg = ADVERTISE_PAUSE_CAP;
1621         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1622                 miireg = ADVERTISE_PAUSE_ASYM;
1623         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1624                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1625         else
1626                 miireg = 0;
1627
1628         return miireg;
1629 }
1630
1631 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1632 {
1633         u16 miireg;
1634
1635         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1636                 miireg = ADVERTISE_1000XPAUSE;
1637         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1638                 miireg = ADVERTISE_1000XPSE_ASYM;
1639         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1640                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1641         else
1642                 miireg = 0;
1643
1644         return miireg;
1645 }
1646
1647 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1648 {
1649         u8 cap = 0;
1650
1651         if (lcladv & ADVERTISE_PAUSE_CAP) {
1652                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1653                         if (rmtadv & LPA_PAUSE_CAP)
1654                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1655                         else if (rmtadv & LPA_PAUSE_ASYM)
1656                                 cap = TG3_FLOW_CTRL_RX;
1657                 } else {
1658                         if (rmtadv & LPA_PAUSE_CAP)
1659                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1660                 }
1661         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1662                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1663                         cap = TG3_FLOW_CTRL_TX;
1664         }
1665
1666         return cap;
1667 }
1668
1669 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1670 {
1671         u8 cap = 0;
1672
1673         if (lcladv & ADVERTISE_1000XPAUSE) {
1674                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1675                         if (rmtadv & LPA_1000XPAUSE)
1676                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1677                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1678                                 cap = TG3_FLOW_CTRL_RX;
1679                 } else {
1680                         if (rmtadv & LPA_1000XPAUSE)
1681                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1682                 }
1683         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1684                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1685                         cap = TG3_FLOW_CTRL_TX;
1686         }
1687
1688         return cap;
1689 }
1690
1691 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1692 {
1693         u8 new_tg3_flags = 0;
1694         u32 old_rx_mode = tp->rx_mode;
1695         u32 old_tx_mode = tp->tx_mode;
1696
1697         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1698                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1699                         new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1700                                                                    remote_adv);
1701                 else
1702                         new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1703                                                                    remote_adv);
1704         } else {
1705                 new_tg3_flags = tp->link_config.flowctrl;
1706         }
1707
1708         tp->link_config.active_flowctrl = new_tg3_flags;
1709
1710         if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1711                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1712         else
1713                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1714
1715         if (old_rx_mode != tp->rx_mode) {
1716                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1717         }
1718
1719         if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1720                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1721         else
1722                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1723
1724         if (old_tx_mode != tp->tx_mode) {
1725                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1726         }
1727 }
1728
1729 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1730 {
1731         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1732         case MII_TG3_AUX_STAT_10HALF:
1733                 *speed = SPEED_10;
1734                 *duplex = DUPLEX_HALF;
1735                 break;
1736
1737         case MII_TG3_AUX_STAT_10FULL:
1738                 *speed = SPEED_10;
1739                 *duplex = DUPLEX_FULL;
1740                 break;
1741
1742         case MII_TG3_AUX_STAT_100HALF:
1743                 *speed = SPEED_100;
1744                 *duplex = DUPLEX_HALF;
1745                 break;
1746
1747         case MII_TG3_AUX_STAT_100FULL:
1748                 *speed = SPEED_100;
1749                 *duplex = DUPLEX_FULL;
1750                 break;
1751
1752         case MII_TG3_AUX_STAT_1000HALF:
1753                 *speed = SPEED_1000;
1754                 *duplex = DUPLEX_HALF;
1755                 break;
1756
1757         case MII_TG3_AUX_STAT_1000FULL:
1758                 *speed = SPEED_1000;
1759                 *duplex = DUPLEX_FULL;
1760                 break;
1761
1762         default:
1763                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1764                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1765                                  SPEED_10;
1766                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1767                                   DUPLEX_HALF;
1768                         break;
1769                 }
1770                 *speed = SPEED_INVALID;
1771                 *duplex = DUPLEX_INVALID;
1772                 break;
1773         };
1774 }
1775
1776 static void tg3_phy_copper_begin(struct tg3 *tp)
1777 {
1778         u32 new_adv;
1779         int i;
1780
1781         if (tp->link_config.phy_is_low_power) {
1782                 /* Entering low power mode.  Disable gigabit and
1783                  * 100baseT advertisements.
1784                  */
1785                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1786
1787                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1788                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1789                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1790                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1791
1792                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1793         } else if (tp->link_config.speed == SPEED_INVALID) {
1794                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1795                         tp->link_config.advertising &=
1796                                 ~(ADVERTISED_1000baseT_Half |
1797                                   ADVERTISED_1000baseT_Full);
1798
1799                 new_adv = ADVERTISE_CSMA;
1800                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1801                         new_adv |= ADVERTISE_10HALF;
1802                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1803                         new_adv |= ADVERTISE_10FULL;
1804                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1805                         new_adv |= ADVERTISE_100HALF;
1806                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1807                         new_adv |= ADVERTISE_100FULL;
1808
1809                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1810
1811                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1812
1813                 if (tp->link_config.advertising &
1814                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1815                         new_adv = 0;
1816                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1817                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1818                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1819                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1820                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1821                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1822                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1823                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1824                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1825                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1826                 } else {
1827                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1828                 }
1829         } else {
1830                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1831                 new_adv |= ADVERTISE_CSMA;
1832
1833                 /* Asking for a specific link mode. */
1834                 if (tp->link_config.speed == SPEED_1000) {
1835                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1836
1837                         if (tp->link_config.duplex == DUPLEX_FULL)
1838                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1839                         else
1840                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1841                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1842                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1843                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1844                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1845                 } else {
1846                         if (tp->link_config.speed == SPEED_100) {
1847                                 if (tp->link_config.duplex == DUPLEX_FULL)
1848                                         new_adv |= ADVERTISE_100FULL;
1849                                 else
1850                                         new_adv |= ADVERTISE_100HALF;
1851                         } else {
1852                                 if (tp->link_config.duplex == DUPLEX_FULL)
1853                                         new_adv |= ADVERTISE_10FULL;
1854                                 else
1855                                         new_adv |= ADVERTISE_10HALF;
1856                         }
1857                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1858
1859                         new_adv = 0;
1860                 }
1861
1862                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1863         }
1864
1865         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1866             tp->link_config.speed != SPEED_INVALID) {
1867                 u32 bmcr, orig_bmcr;
1868
1869                 tp->link_config.active_speed = tp->link_config.speed;
1870                 tp->link_config.active_duplex = tp->link_config.duplex;
1871
1872                 bmcr = 0;
1873                 switch (tp->link_config.speed) {
1874                 default:
1875                 case SPEED_10:
1876                         break;
1877
1878                 case SPEED_100:
1879                         bmcr |= BMCR_SPEED100;
1880                         break;
1881
1882                 case SPEED_1000:
1883                         bmcr |= TG3_BMCR_SPEED1000;
1884                         break;
1885                 };
1886
1887                 if (tp->link_config.duplex == DUPLEX_FULL)
1888                         bmcr |= BMCR_FULLDPLX;
1889
1890                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1891                     (bmcr != orig_bmcr)) {
1892                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1893                         for (i = 0; i < 1500; i++) {
1894                                 u32 tmp;
1895
1896                                 udelay(10);
1897                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1898                                     tg3_readphy(tp, MII_BMSR, &tmp))
1899                                         continue;
1900                                 if (!(tmp & BMSR_LSTATUS)) {
1901                                         udelay(40);
1902                                         break;
1903                                 }
1904                         }
1905                         tg3_writephy(tp, MII_BMCR, bmcr);
1906                         udelay(40);
1907                 }
1908         } else {
1909                 tg3_writephy(tp, MII_BMCR,
1910                              BMCR_ANENABLE | BMCR_ANRESTART);
1911         }
1912 }
1913
1914 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1915 {
1916         int err;
1917
1918         /* Turn off tap power management. */
1919         /* Set Extended packet length bit */
1920         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1921
1922         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1923         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1924
1925         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1926         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1927
1928         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1929         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1930
1931         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1932         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1933
1934         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1935         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1936
1937         udelay(40);
1938
1939         return err;
1940 }
1941
1942 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1943 {
1944         u32 adv_reg, all_mask = 0;
1945
1946         if (mask & ADVERTISED_10baseT_Half)
1947                 all_mask |= ADVERTISE_10HALF;
1948         if (mask & ADVERTISED_10baseT_Full)
1949                 all_mask |= ADVERTISE_10FULL;
1950         if (mask & ADVERTISED_100baseT_Half)
1951                 all_mask |= ADVERTISE_100HALF;
1952         if (mask & ADVERTISED_100baseT_Full)
1953                 all_mask |= ADVERTISE_100FULL;
1954
1955         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1956                 return 0;
1957
1958         if ((adv_reg & all_mask) != all_mask)
1959                 return 0;
1960         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1961                 u32 tg3_ctrl;
1962
1963                 all_mask = 0;
1964                 if (mask & ADVERTISED_1000baseT_Half)
1965                         all_mask |= ADVERTISE_1000HALF;
1966                 if (mask & ADVERTISED_1000baseT_Full)
1967                         all_mask |= ADVERTISE_1000FULL;
1968
1969                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1970                         return 0;
1971
1972                 if ((tg3_ctrl & all_mask) != all_mask)
1973                         return 0;
1974         }
1975         return 1;
1976 }
1977
1978 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1979 {
1980         int current_link_up;
1981         u32 bmsr, dummy;
1982         u16 current_speed;
1983         u8 current_duplex;
1984         int i, err;
1985
1986         tw32(MAC_EVENT, 0);
1987
1988         tw32_f(MAC_STATUS,
1989              (MAC_STATUS_SYNC_CHANGED |
1990               MAC_STATUS_CFG_CHANGED |
1991               MAC_STATUS_MI_COMPLETION |
1992               MAC_STATUS_LNKSTATE_CHANGED));
1993         udelay(40);
1994
1995         tp->mi_mode = MAC_MI_MODE_BASE;
1996         tw32_f(MAC_MI_MODE, tp->mi_mode);
1997         udelay(80);
1998
1999         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2000
2001         /* Some third-party PHYs need to be reset on link going
2002          * down.
2003          */
2004         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2005              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2006              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2007             netif_carrier_ok(tp->dev)) {
2008                 tg3_readphy(tp, MII_BMSR, &bmsr);
2009                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2010                     !(bmsr & BMSR_LSTATUS))
2011                         force_reset = 1;
2012         }
2013         if (force_reset)
2014                 tg3_phy_reset(tp);
2015
2016         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2017                 tg3_readphy(tp, MII_BMSR, &bmsr);
2018                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2019                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2020                         bmsr = 0;
2021
2022                 if (!(bmsr & BMSR_LSTATUS)) {
2023                         err = tg3_init_5401phy_dsp(tp);
2024                         if (err)
2025                                 return err;
2026
2027                         tg3_readphy(tp, MII_BMSR, &bmsr);
2028                         for (i = 0; i < 1000; i++) {
2029                                 udelay(10);
2030                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2031                                     (bmsr & BMSR_LSTATUS)) {
2032                                         udelay(40);
2033                                         break;
2034                                 }
2035                         }
2036
2037                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2038                             !(bmsr & BMSR_LSTATUS) &&
2039                             tp->link_config.active_speed == SPEED_1000) {
2040                                 err = tg3_phy_reset(tp);
2041                                 if (!err)
2042                                         err = tg3_init_5401phy_dsp(tp);
2043                                 if (err)
2044                                         return err;
2045                         }
2046                 }
2047         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2048                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2049                 /* 5701 {A0,B0} CRC bug workaround */
2050                 tg3_writephy(tp, 0x15, 0x0a75);
2051                 tg3_writephy(tp, 0x1c, 0x8c68);
2052                 tg3_writephy(tp, 0x1c, 0x8d68);
2053                 tg3_writephy(tp, 0x1c, 0x8c68);
2054         }
2055
2056         /* Clear pending interrupts... */
2057         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2058         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2059
2060         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2061                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2062         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2063                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2064
2065         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2066             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2067                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2068                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2069                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2070                 else
2071                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2072         }
2073
2074         current_link_up = 0;
2075         current_speed = SPEED_INVALID;
2076         current_duplex = DUPLEX_INVALID;
2077
2078         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2079                 u32 val;
2080
2081                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2082                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2083                 if (!(val & (1 << 10))) {
2084                         val |= (1 << 10);
2085                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2086                         goto relink;
2087                 }
2088         }
2089
2090         bmsr = 0;
2091         for (i = 0; i < 100; i++) {
2092                 tg3_readphy(tp, MII_BMSR, &bmsr);
2093                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2094                     (bmsr & BMSR_LSTATUS))
2095                         break;
2096                 udelay(40);
2097         }
2098
2099         if (bmsr & BMSR_LSTATUS) {
2100                 u32 aux_stat, bmcr;
2101
2102                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2103                 for (i = 0; i < 2000; i++) {
2104                         udelay(10);
2105                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2106                             aux_stat)
2107                                 break;
2108                 }
2109
2110                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2111                                              &current_speed,
2112                                              &current_duplex);
2113
2114                 bmcr = 0;
2115                 for (i = 0; i < 200; i++) {
2116                         tg3_readphy(tp, MII_BMCR, &bmcr);
2117                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2118                                 continue;
2119                         if (bmcr && bmcr != 0x7fff)
2120                                 break;
2121                         udelay(10);
2122                 }
2123
2124                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2125                         if (bmcr & BMCR_ANENABLE) {
2126                                 current_link_up = 1;
2127
2128                                 /* Force autoneg restart if we are exiting
2129                                  * low power mode.
2130                                  */
2131                                 if (!tg3_copper_is_advertising_all(tp,
2132                                                 tp->link_config.advertising))
2133                                         current_link_up = 0;
2134                         } else {
2135                                 current_link_up = 0;
2136                         }
2137                 } else {
2138                         if (!(bmcr & BMCR_ANENABLE) &&
2139                             tp->link_config.speed == current_speed &&
2140                             tp->link_config.duplex == current_duplex) {
2141                                 current_link_up = 1;
2142                         } else {
2143                                 current_link_up = 0;
2144                         }
2145                 }
2146
2147                 tp->link_config.active_speed = current_speed;
2148                 tp->link_config.active_duplex = current_duplex;
2149         }
2150
2151         if (current_link_up == 1 &&
2152             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2153             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2154                 u32 local_adv, remote_adv;
2155
2156                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2157                         local_adv = 0;
2158
2159                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2160                         remote_adv = 0;
2161
2162                 /* If we are not advertising what has been requested,
2163                  * bring the link down and reconfigure.
2164                  */
2165                 if (local_adv !=
2166                     tg3_advert_flowctrl_1000T(tp->link_config.flowctrl)) {
2167                         current_link_up = 0;
2168                 } else {
2169                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2170                 }
2171         }
2172 relink:
2173         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2174                 u32 tmp;
2175
2176                 tg3_phy_copper_begin(tp);
2177
2178                 tg3_readphy(tp, MII_BMSR, &tmp);
2179                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2180                     (tmp & BMSR_LSTATUS))
2181                         current_link_up = 1;
2182         }
2183
2184         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2185         if (current_link_up == 1) {
2186                 if (tp->link_config.active_speed == SPEED_100 ||
2187                     tp->link_config.active_speed == SPEED_10)
2188                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2189                 else
2190                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2191         } else
2192                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2193
2194         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2195         if (tp->link_config.active_duplex == DUPLEX_HALF)
2196                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2197
2198         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2199                 if (current_link_up == 1 &&
2200                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2201                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2202                 else
2203                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2204         }
2205
2206         /* ??? Without this setting Netgear GA302T PHY does not
2207          * ??? send/receive packets...
2208          */
2209         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2210             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2211                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2212                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2213                 udelay(80);
2214         }
2215
2216         tw32_f(MAC_MODE, tp->mac_mode);
2217         udelay(40);
2218
2219         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2220                 /* Polled via timer. */
2221                 tw32_f(MAC_EVENT, 0);
2222         } else {
2223                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2224         }
2225         udelay(40);
2226
2227         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2228             current_link_up == 1 &&
2229             tp->link_config.active_speed == SPEED_1000 &&
2230             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2231              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2232                 udelay(120);
2233                 tw32_f(MAC_STATUS,
2234                      (MAC_STATUS_SYNC_CHANGED |
2235                       MAC_STATUS_CFG_CHANGED));
2236                 udelay(40);
2237                 tg3_write_mem(tp,
2238                               NIC_SRAM_FIRMWARE_MBOX,
2239                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2240         }
2241
2242         if (current_link_up != netif_carrier_ok(tp->dev)) {
2243                 if (current_link_up)
2244                         netif_carrier_on(tp->dev);
2245                 else
2246                         netif_carrier_off(tp->dev);
2247                 tg3_link_report(tp);
2248         }
2249
2250         return 0;
2251 }
2252
2253 struct tg3_fiber_aneginfo {
2254         int state;
2255 #define ANEG_STATE_UNKNOWN              0
2256 #define ANEG_STATE_AN_ENABLE            1
2257 #define ANEG_STATE_RESTART_INIT         2
2258 #define ANEG_STATE_RESTART              3
2259 #define ANEG_STATE_DISABLE_LINK_OK      4
2260 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2261 #define ANEG_STATE_ABILITY_DETECT       6
2262 #define ANEG_STATE_ACK_DETECT_INIT      7
2263 #define ANEG_STATE_ACK_DETECT           8
2264 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2265 #define ANEG_STATE_COMPLETE_ACK         10
2266 #define ANEG_STATE_IDLE_DETECT_INIT     11
2267 #define ANEG_STATE_IDLE_DETECT          12
2268 #define ANEG_STATE_LINK_OK              13
2269 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2270 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2271
2272         u32 flags;
2273 #define MR_AN_ENABLE            0x00000001
2274 #define MR_RESTART_AN           0x00000002
2275 #define MR_AN_COMPLETE          0x00000004
2276 #define MR_PAGE_RX              0x00000008
2277 #define MR_NP_LOADED            0x00000010
2278 #define MR_TOGGLE_TX            0x00000020
2279 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2280 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2281 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2282 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2283 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2284 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2285 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2286 #define MR_TOGGLE_RX            0x00002000
2287 #define MR_NP_RX                0x00004000
2288
2289 #define MR_LINK_OK              0x80000000
2290
2291         unsigned long link_time, cur_time;
2292
2293         u32 ability_match_cfg;
2294         int ability_match_count;
2295
2296         char ability_match, idle_match, ack_match;
2297
2298         u32 txconfig, rxconfig;
2299 #define ANEG_CFG_NP             0x00000080
2300 #define ANEG_CFG_ACK            0x00000040
2301 #define ANEG_CFG_RF2            0x00000020
2302 #define ANEG_CFG_RF1            0x00000010
2303 #define ANEG_CFG_PS2            0x00000001
2304 #define ANEG_CFG_PS1            0x00008000
2305 #define ANEG_CFG_HD             0x00004000
2306 #define ANEG_CFG_FD             0x00002000
2307 #define ANEG_CFG_INVAL          0x00001f06
2308
2309 };
2310 #define ANEG_OK         0
2311 #define ANEG_DONE       1
2312 #define ANEG_TIMER_ENAB 2
2313 #define ANEG_FAILED     -1
2314
2315 #define ANEG_STATE_SETTLE_TIME  10000
2316
2317 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2318                                    struct tg3_fiber_aneginfo *ap)
2319 {
2320         unsigned long delta;
2321         u32 rx_cfg_reg;
2322         int ret;
2323
2324         if (ap->state == ANEG_STATE_UNKNOWN) {
2325                 ap->rxconfig = 0;
2326                 ap->link_time = 0;
2327                 ap->cur_time = 0;
2328                 ap->ability_match_cfg = 0;
2329                 ap->ability_match_count = 0;
2330                 ap->ability_match = 0;
2331                 ap->idle_match = 0;
2332                 ap->ack_match = 0;
2333         }
2334         ap->cur_time++;
2335
2336         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2337                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2338
2339                 if (rx_cfg_reg != ap->ability_match_cfg) {
2340                         ap->ability_match_cfg = rx_cfg_reg;
2341                         ap->ability_match = 0;
2342                         ap->ability_match_count = 0;
2343                 } else {
2344                         if (++ap->ability_match_count > 1) {
2345                                 ap->ability_match = 1;
2346                                 ap->ability_match_cfg = rx_cfg_reg;
2347                         }
2348                 }
2349                 if (rx_cfg_reg & ANEG_CFG_ACK)
2350                         ap->ack_match = 1;
2351                 else
2352                         ap->ack_match = 0;
2353
2354                 ap->idle_match = 0;
2355         } else {
2356                 ap->idle_match = 1;
2357                 ap->ability_match_cfg = 0;
2358                 ap->ability_match_count = 0;
2359                 ap->ability_match = 0;
2360                 ap->ack_match = 0;
2361
2362                 rx_cfg_reg = 0;
2363         }
2364
2365         ap->rxconfig = rx_cfg_reg;
2366         ret = ANEG_OK;
2367
2368         switch(ap->state) {
2369         case ANEG_STATE_UNKNOWN:
2370                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2371                         ap->state = ANEG_STATE_AN_ENABLE;
2372
2373                 /* fallthru */
2374         case ANEG_STATE_AN_ENABLE:
2375                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2376                 if (ap->flags & MR_AN_ENABLE) {
2377                         ap->link_time = 0;
2378                         ap->cur_time = 0;
2379                         ap->ability_match_cfg = 0;
2380                         ap->ability_match_count = 0;
2381                         ap->ability_match = 0;
2382                         ap->idle_match = 0;
2383                         ap->ack_match = 0;
2384
2385                         ap->state = ANEG_STATE_RESTART_INIT;
2386                 } else {
2387                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2388                 }
2389                 break;
2390
2391         case ANEG_STATE_RESTART_INIT:
2392                 ap->link_time = ap->cur_time;
2393                 ap->flags &= ~(MR_NP_LOADED);
2394                 ap->txconfig = 0;
2395                 tw32(MAC_TX_AUTO_NEG, 0);
2396                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2397                 tw32_f(MAC_MODE, tp->mac_mode);
2398                 udelay(40);
2399
2400                 ret = ANEG_TIMER_ENAB;
2401                 ap->state = ANEG_STATE_RESTART;
2402
2403                 /* fallthru */
2404         case ANEG_STATE_RESTART:
2405                 delta = ap->cur_time - ap->link_time;
2406                 if (delta > ANEG_STATE_SETTLE_TIME) {
2407                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2408                 } else {
2409                         ret = ANEG_TIMER_ENAB;
2410                 }
2411                 break;
2412
2413         case ANEG_STATE_DISABLE_LINK_OK:
2414                 ret = ANEG_DONE;
2415                 break;
2416
2417         case ANEG_STATE_ABILITY_DETECT_INIT:
2418                 ap->flags &= ~(MR_TOGGLE_TX);
2419                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2420                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2421                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2422                 tw32_f(MAC_MODE, tp->mac_mode);
2423                 udelay(40);
2424
2425                 ap->state = ANEG_STATE_ABILITY_DETECT;
2426                 break;
2427
2428         case ANEG_STATE_ABILITY_DETECT:
2429                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2430                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2431                 }
2432                 break;
2433
2434         case ANEG_STATE_ACK_DETECT_INIT:
2435                 ap->txconfig |= ANEG_CFG_ACK;
2436                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2437                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2438                 tw32_f(MAC_MODE, tp->mac_mode);
2439                 udelay(40);
2440
2441                 ap->state = ANEG_STATE_ACK_DETECT;
2442
2443                 /* fallthru */
2444         case ANEG_STATE_ACK_DETECT:
2445                 if (ap->ack_match != 0) {
2446                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2447                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2448                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2449                         } else {
2450                                 ap->state = ANEG_STATE_AN_ENABLE;
2451                         }
2452                 } else if (ap->ability_match != 0 &&
2453                            ap->rxconfig == 0) {
2454                         ap->state = ANEG_STATE_AN_ENABLE;
2455                 }
2456                 break;
2457
2458         case ANEG_STATE_COMPLETE_ACK_INIT:
2459                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2460                         ret = ANEG_FAILED;
2461                         break;
2462                 }
2463                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2464                                MR_LP_ADV_HALF_DUPLEX |
2465                                MR_LP_ADV_SYM_PAUSE |
2466                                MR_LP_ADV_ASYM_PAUSE |
2467                                MR_LP_ADV_REMOTE_FAULT1 |
2468                                MR_LP_ADV_REMOTE_FAULT2 |
2469                                MR_LP_ADV_NEXT_PAGE |
2470                                MR_TOGGLE_RX |
2471                                MR_NP_RX);
2472                 if (ap->rxconfig & ANEG_CFG_FD)
2473                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2474                 if (ap->rxconfig & ANEG_CFG_HD)
2475                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2476                 if (ap->rxconfig & ANEG_CFG_PS1)
2477                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2478                 if (ap->rxconfig & ANEG_CFG_PS2)
2479                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2480                 if (ap->rxconfig & ANEG_CFG_RF1)
2481                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2482                 if (ap->rxconfig & ANEG_CFG_RF2)
2483                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2484                 if (ap->rxconfig & ANEG_CFG_NP)
2485                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2486
2487                 ap->link_time = ap->cur_time;
2488
2489                 ap->flags ^= (MR_TOGGLE_TX);
2490                 if (ap->rxconfig & 0x0008)
2491                         ap->flags |= MR_TOGGLE_RX;
2492                 if (ap->rxconfig & ANEG_CFG_NP)
2493                         ap->flags |= MR_NP_RX;
2494                 ap->flags |= MR_PAGE_RX;
2495
2496                 ap->state = ANEG_STATE_COMPLETE_ACK;
2497                 ret = ANEG_TIMER_ENAB;
2498                 break;
2499
2500         case ANEG_STATE_COMPLETE_ACK:
2501                 if (ap->ability_match != 0 &&
2502                     ap->rxconfig == 0) {
2503                         ap->state = ANEG_STATE_AN_ENABLE;
2504                         break;
2505                 }
2506                 delta = ap->cur_time - ap->link_time;
2507                 if (delta > ANEG_STATE_SETTLE_TIME) {
2508                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2509                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2510                         } else {
2511                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2512                                     !(ap->flags & MR_NP_RX)) {
2513                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2514                                 } else {
2515                                         ret = ANEG_FAILED;
2516                                 }
2517                         }
2518                 }
2519                 break;
2520
2521         case ANEG_STATE_IDLE_DETECT_INIT:
2522                 ap->link_time = ap->cur_time;
2523                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2524                 tw32_f(MAC_MODE, tp->mac_mode);
2525                 udelay(40);
2526
2527                 ap->state = ANEG_STATE_IDLE_DETECT;
2528                 ret = ANEG_TIMER_ENAB;
2529                 break;
2530
2531         case ANEG_STATE_IDLE_DETECT:
2532                 if (ap->ability_match != 0 &&
2533                     ap->rxconfig == 0) {
2534                         ap->state = ANEG_STATE_AN_ENABLE;
2535                         break;
2536                 }
2537                 delta = ap->cur_time - ap->link_time;
2538                 if (delta > ANEG_STATE_SETTLE_TIME) {
2539                         /* XXX another gem from the Broadcom driver :( */
2540                         ap->state = ANEG_STATE_LINK_OK;
2541                 }
2542                 break;
2543
2544         case ANEG_STATE_LINK_OK:
2545                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2546                 ret = ANEG_DONE;
2547                 break;
2548
2549         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2550                 /* ??? unimplemented */
2551                 break;
2552
2553         case ANEG_STATE_NEXT_PAGE_WAIT:
2554                 /* ??? unimplemented */
2555                 break;
2556
2557         default:
2558                 ret = ANEG_FAILED;
2559                 break;
2560         };
2561
2562         return ret;
2563 }
2564
2565 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2566 {
2567         int res = 0;
2568         struct tg3_fiber_aneginfo aninfo;
2569         int status = ANEG_FAILED;
2570         unsigned int tick;
2571         u32 tmp;
2572
2573         tw32_f(MAC_TX_AUTO_NEG, 0);
2574
2575         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2576         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2577         udelay(40);
2578
2579         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2580         udelay(40);
2581
2582         memset(&aninfo, 0, sizeof(aninfo));
2583         aninfo.flags |= MR_AN_ENABLE;
2584         aninfo.state = ANEG_STATE_UNKNOWN;
2585         aninfo.cur_time = 0;
2586         tick = 0;
2587         while (++tick < 195000) {
2588                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2589                 if (status == ANEG_DONE || status == ANEG_FAILED)
2590                         break;
2591
2592                 udelay(1);
2593         }
2594
2595         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2596         tw32_f(MAC_MODE, tp->mac_mode);
2597         udelay(40);
2598
2599         *flags = aninfo.flags;
2600
2601         if (status == ANEG_DONE &&
2602             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2603                              MR_LP_ADV_FULL_DUPLEX)))
2604                 res = 1;
2605
2606         return res;
2607 }
2608
2609 static void tg3_init_bcm8002(struct tg3 *tp)
2610 {
2611         u32 mac_status = tr32(MAC_STATUS);
2612         int i;
2613
2614         /* Reset when initting first time or we have a link. */
2615         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2616             !(mac_status & MAC_STATUS_PCS_SYNCED))
2617                 return;
2618
2619         /* Set PLL lock range. */
2620         tg3_writephy(tp, 0x16, 0x8007);
2621
2622         /* SW reset */
2623         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2624
2625         /* Wait for reset to complete. */
2626         /* XXX schedule_timeout() ... */
2627         for (i = 0; i < 500; i++)
2628                 udelay(10);
2629
2630         /* Config mode; select PMA/Ch 1 regs. */
2631         tg3_writephy(tp, 0x10, 0x8411);
2632
2633         /* Enable auto-lock and comdet, select txclk for tx. */
2634         tg3_writephy(tp, 0x11, 0x0a10);
2635
2636         tg3_writephy(tp, 0x18, 0x00a0);
2637         tg3_writephy(tp, 0x16, 0x41ff);
2638
2639         /* Assert and deassert POR. */
2640         tg3_writephy(tp, 0x13, 0x0400);
2641         udelay(40);
2642         tg3_writephy(tp, 0x13, 0x0000);
2643
2644         tg3_writephy(tp, 0x11, 0x0a50);
2645         udelay(40);
2646         tg3_writephy(tp, 0x11, 0x0a10);
2647
2648         /* Wait for signal to stabilize */
2649         /* XXX schedule_timeout() ... */
2650         for (i = 0; i < 15000; i++)
2651                 udelay(10);
2652
2653         /* Deselect the channel register so we can read the PHYID
2654          * later.
2655          */
2656         tg3_writephy(tp, 0x10, 0x8011);
2657 }
2658
2659 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2660 {
2661         u32 sg_dig_ctrl, sg_dig_status;
2662         u32 serdes_cfg, expected_sg_dig_ctrl;
2663         int workaround, port_a;
2664         int current_link_up;
2665
2666         serdes_cfg = 0;
2667         expected_sg_dig_ctrl = 0;
2668         workaround = 0;
2669         port_a = 1;
2670         current_link_up = 0;
2671
2672         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2673             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2674                 workaround = 1;
2675                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2676                         port_a = 0;
2677
2678                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2679                 /* preserve bits 20-23 for voltage regulator */
2680                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2681         }
2682
2683         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2684
2685         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2686                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2687                         if (workaround) {
2688                                 u32 val = serdes_cfg;
2689
2690                                 if (port_a)
2691                                         val |= 0xc010000;
2692                                 else
2693                                         val |= 0x4010000;
2694                                 tw32_f(MAC_SERDES_CFG, val);
2695                         }
2696
2697                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2698                 }
2699                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2700                         tg3_setup_flow_control(tp, 0, 0);
2701                         current_link_up = 1;
2702                 }
2703                 goto out;
2704         }
2705
2706         /* Want auto-negotiation.  */
2707         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2708
2709         /* Pause capability */
2710         expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2711
2712         /* Asymettric pause */
2713         expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2714
2715         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2716                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2717                     tp->serdes_counter &&
2718                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2719                                     MAC_STATUS_RCVD_CFG)) ==
2720                      MAC_STATUS_PCS_SYNCED)) {
2721                         tp->serdes_counter--;
2722                         current_link_up = 1;
2723                         goto out;
2724                 }
2725 restart_autoneg:
2726                 if (workaround)
2727                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2728                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2729                 udelay(5);
2730                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2731
2732                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2733                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2734         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2735                                  MAC_STATUS_SIGNAL_DET)) {
2736                 sg_dig_status = tr32(SG_DIG_STATUS);
2737                 mac_status = tr32(MAC_STATUS);
2738
2739                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2740                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2741                         u32 local_adv, remote_adv;
2742
2743                         local_adv = ADVERTISE_PAUSE_CAP;
2744                         remote_adv = 0;
2745                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2746                                 remote_adv |= LPA_PAUSE_CAP;
2747                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2748                                 remote_adv |= LPA_PAUSE_ASYM;
2749
2750                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2751                         current_link_up = 1;
2752                         tp->serdes_counter = 0;
2753                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2754                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2755                         if (tp->serdes_counter)
2756                                 tp->serdes_counter--;
2757                         else {
2758                                 if (workaround) {
2759                                         u32 val = serdes_cfg;
2760
2761                                         if (port_a)
2762                                                 val |= 0xc010000;
2763                                         else
2764                                                 val |= 0x4010000;
2765
2766                                         tw32_f(MAC_SERDES_CFG, val);
2767                                 }
2768
2769                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2770                                 udelay(40);
2771
2772                                 /* Link parallel detection - link is up */
2773                                 /* only if we have PCS_SYNC and not */
2774                                 /* receiving config code words */
2775                                 mac_status = tr32(MAC_STATUS);
2776                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2777                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2778                                         tg3_setup_flow_control(tp, 0, 0);
2779                                         current_link_up = 1;
2780                                         tp->tg3_flags2 |=
2781                                                 TG3_FLG2_PARALLEL_DETECT;
2782                                         tp->serdes_counter =
2783                                                 SERDES_PARALLEL_DET_TIMEOUT;
2784                                 } else
2785                                         goto restart_autoneg;
2786                         }
2787                 }
2788         } else {
2789                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2790                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2791         }
2792
2793 out:
2794         return current_link_up;
2795 }
2796
2797 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2798 {
2799         int current_link_up = 0;
2800
2801         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2802                 goto out;
2803
2804         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2805                 u32 flags;
2806                 int i;
2807
2808                 if (fiber_autoneg(tp, &flags)) {
2809                         u32 local_adv, remote_adv;
2810
2811                         local_adv = ADVERTISE_PAUSE_CAP;
2812                         remote_adv = 0;
2813                         if (flags & MR_LP_ADV_SYM_PAUSE)
2814                                 remote_adv |= LPA_PAUSE_CAP;
2815                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2816                                 remote_adv |= LPA_PAUSE_ASYM;
2817
2818                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2819
2820                         current_link_up = 1;
2821                 }
2822                 for (i = 0; i < 30; i++) {
2823                         udelay(20);
2824                         tw32_f(MAC_STATUS,
2825                                (MAC_STATUS_SYNC_CHANGED |
2826                                 MAC_STATUS_CFG_CHANGED));
2827                         udelay(40);
2828                         if ((tr32(MAC_STATUS) &
2829                              (MAC_STATUS_SYNC_CHANGED |
2830                               MAC_STATUS_CFG_CHANGED)) == 0)
2831                                 break;
2832                 }
2833
2834                 mac_status = tr32(MAC_STATUS);
2835                 if (current_link_up == 0 &&
2836                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2837                     !(mac_status & MAC_STATUS_RCVD_CFG))
2838                         current_link_up = 1;
2839         } else {
2840                 /* Forcing 1000FD link up. */
2841                 current_link_up = 1;
2842
2843                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2844                 udelay(40);
2845
2846                 tw32_f(MAC_MODE, tp->mac_mode);
2847                 udelay(40);
2848         }
2849
2850 out:
2851         return current_link_up;
2852 }
2853
2854 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2855 {
2856         u32 orig_pause_cfg;
2857         u16 orig_active_speed;
2858         u8 orig_active_duplex;
2859         u32 mac_status;
2860         int current_link_up;
2861         int i;
2862
2863         orig_pause_cfg = tp->link_config.active_flowctrl;
2864         orig_active_speed = tp->link_config.active_speed;
2865         orig_active_duplex = tp->link_config.active_duplex;
2866
2867         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2868             netif_carrier_ok(tp->dev) &&
2869             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2870                 mac_status = tr32(MAC_STATUS);
2871                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2872                                MAC_STATUS_SIGNAL_DET |
2873                                MAC_STATUS_CFG_CHANGED |
2874                                MAC_STATUS_RCVD_CFG);
2875                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2876                                    MAC_STATUS_SIGNAL_DET)) {
2877                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2878                                             MAC_STATUS_CFG_CHANGED));
2879                         return 0;
2880                 }
2881         }
2882
2883         tw32_f(MAC_TX_AUTO_NEG, 0);
2884
2885         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2886         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2887         tw32_f(MAC_MODE, tp->mac_mode);
2888         udelay(40);
2889
2890         if (tp->phy_id == PHY_ID_BCM8002)
2891                 tg3_init_bcm8002(tp);
2892
2893         /* Enable link change event even when serdes polling.  */
2894         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2895         udelay(40);
2896
2897         current_link_up = 0;
2898         mac_status = tr32(MAC_STATUS);
2899
2900         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2901                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2902         else
2903                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2904
2905         tp->hw_status->status =
2906                 (SD_STATUS_UPDATED |
2907                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2908
2909         for (i = 0; i < 100; i++) {
2910                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2911                                     MAC_STATUS_CFG_CHANGED));
2912                 udelay(5);
2913                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2914                                          MAC_STATUS_CFG_CHANGED |
2915                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2916                         break;
2917         }
2918
2919         mac_status = tr32(MAC_STATUS);
2920         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2921                 current_link_up = 0;
2922                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2923                     tp->serdes_counter == 0) {
2924                         tw32_f(MAC_MODE, (tp->mac_mode |
2925                                           MAC_MODE_SEND_CONFIGS));
2926                         udelay(1);
2927                         tw32_f(MAC_MODE, tp->mac_mode);
2928                 }
2929         }
2930
2931         if (current_link_up == 1) {
2932                 tp->link_config.active_speed = SPEED_1000;
2933                 tp->link_config.active_duplex = DUPLEX_FULL;
2934                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2935                                     LED_CTRL_LNKLED_OVERRIDE |
2936                                     LED_CTRL_1000MBPS_ON));
2937         } else {
2938                 tp->link_config.active_speed = SPEED_INVALID;
2939                 tp->link_config.active_duplex = DUPLEX_INVALID;
2940                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2941                                     LED_CTRL_LNKLED_OVERRIDE |
2942                                     LED_CTRL_TRAFFIC_OVERRIDE));
2943         }
2944
2945         if (current_link_up != netif_carrier_ok(tp->dev)) {
2946                 if (current_link_up)
2947                         netif_carrier_on(tp->dev);
2948                 else
2949                         netif_carrier_off(tp->dev);
2950                 tg3_link_report(tp);
2951         } else {
2952                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
2953                 if (orig_pause_cfg != now_pause_cfg ||
2954                     orig_active_speed != tp->link_config.active_speed ||
2955                     orig_active_duplex != tp->link_config.active_duplex)
2956                         tg3_link_report(tp);
2957         }
2958
2959         return 0;
2960 }
2961
2962 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2963 {
2964         int current_link_up, err = 0;
2965         u32 bmsr, bmcr;
2966         u16 current_speed;
2967         u8 current_duplex;
2968
2969         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2970         tw32_f(MAC_MODE, tp->mac_mode);
2971         udelay(40);
2972
2973         tw32(MAC_EVENT, 0);
2974
2975         tw32_f(MAC_STATUS,
2976              (MAC_STATUS_SYNC_CHANGED |
2977               MAC_STATUS_CFG_CHANGED |
2978               MAC_STATUS_MI_COMPLETION |
2979               MAC_STATUS_LNKSTATE_CHANGED));
2980         udelay(40);
2981
2982         if (force_reset)
2983                 tg3_phy_reset(tp);
2984
2985         current_link_up = 0;
2986         current_speed = SPEED_INVALID;
2987         current_duplex = DUPLEX_INVALID;
2988
2989         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2990         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2991         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2992                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2993                         bmsr |= BMSR_LSTATUS;
2994                 else
2995                         bmsr &= ~BMSR_LSTATUS;
2996         }
2997
2998         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2999
3000         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3001             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3002                 /* do nothing, just check for link up at the end */
3003         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3004                 u32 adv, new_adv;
3005
3006                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3007                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3008                                   ADVERTISE_1000XPAUSE |
3009                                   ADVERTISE_1000XPSE_ASYM |
3010                                   ADVERTISE_SLCT);
3011
3012                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3013
3014                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3015                         new_adv |= ADVERTISE_1000XHALF;
3016                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3017                         new_adv |= ADVERTISE_1000XFULL;
3018
3019                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3020                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3021                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3022                         tg3_writephy(tp, MII_BMCR, bmcr);
3023
3024                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3025                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3026                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3027
3028                         return err;
3029                 }
3030         } else {
3031                 u32 new_bmcr;
3032
3033                 bmcr &= ~BMCR_SPEED1000;
3034                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3035
3036                 if (tp->link_config.duplex == DUPLEX_FULL)
3037                         new_bmcr |= BMCR_FULLDPLX;
3038
3039                 if (new_bmcr != bmcr) {
3040                         /* BMCR_SPEED1000 is a reserved bit that needs
3041                          * to be set on write.
3042                          */
3043                         new_bmcr |= BMCR_SPEED1000;
3044
3045                         /* Force a linkdown */
3046                         if (netif_carrier_ok(tp->dev)) {
3047                                 u32 adv;
3048
3049                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3050                                 adv &= ~(ADVERTISE_1000XFULL |
3051                                          ADVERTISE_1000XHALF |
3052                                          ADVERTISE_SLCT);
3053                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3054                                 tg3_writephy(tp, MII_BMCR, bmcr |
3055                                                            BMCR_ANRESTART |
3056                                                            BMCR_ANENABLE);
3057                                 udelay(10);
3058                                 netif_carrier_off(tp->dev);
3059                         }
3060                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3061                         bmcr = new_bmcr;
3062                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3063                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3064                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3065                             ASIC_REV_5714) {
3066                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3067                                         bmsr |= BMSR_LSTATUS;
3068                                 else
3069                                         bmsr &= ~BMSR_LSTATUS;
3070                         }
3071                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3072                 }
3073         }
3074
3075         if (bmsr & BMSR_LSTATUS) {
3076                 current_speed = SPEED_1000;
3077                 current_link_up = 1;
3078                 if (bmcr & BMCR_FULLDPLX)
3079                         current_duplex = DUPLEX_FULL;
3080                 else
3081                         current_duplex = DUPLEX_HALF;
3082
3083                 if (bmcr & BMCR_ANENABLE) {
3084                         u32 local_adv, remote_adv, common;
3085
3086                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3087                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3088                         common = local_adv & remote_adv;
3089                         if (common & (ADVERTISE_1000XHALF |
3090                                       ADVERTISE_1000XFULL)) {
3091                                 if (common & ADVERTISE_1000XFULL)
3092                                         current_duplex = DUPLEX_FULL;
3093                                 else
3094                                         current_duplex = DUPLEX_HALF;
3095
3096                                 tg3_setup_flow_control(tp, local_adv,
3097                                                        remote_adv);
3098                         }
3099                         else
3100                                 current_link_up = 0;
3101                 }
3102         }
3103
3104         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3105         if (tp->link_config.active_duplex == DUPLEX_HALF)
3106                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3107
3108         tw32_f(MAC_MODE, tp->mac_mode);
3109         udelay(40);
3110
3111         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3112
3113         tp->link_config.active_speed = current_speed;
3114         tp->link_config.active_duplex = current_duplex;
3115
3116         if (current_link_up != netif_carrier_ok(tp->dev)) {
3117                 if (current_link_up)
3118                         netif_carrier_on(tp->dev);
3119                 else {
3120                         netif_carrier_off(tp->dev);
3121                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3122                 }
3123                 tg3_link_report(tp);
3124         }
3125         return err;
3126 }
3127
3128 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3129 {
3130         if (tp->serdes_counter) {
3131                 /* Give autoneg time to complete. */
3132                 tp->serdes_counter--;
3133                 return;
3134         }
3135         if (!netif_carrier_ok(tp->dev) &&
3136             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3137                 u32 bmcr;
3138
3139                 tg3_readphy(tp, MII_BMCR, &bmcr);
3140                 if (bmcr & BMCR_ANENABLE) {
3141                         u32 phy1, phy2;
3142
3143                         /* Select shadow register 0x1f */
3144                         tg3_writephy(tp, 0x1c, 0x7c00);
3145                         tg3_readphy(tp, 0x1c, &phy1);
3146
3147                         /* Select expansion interrupt status register */
3148                         tg3_writephy(tp, 0x17, 0x0f01);
3149                         tg3_readphy(tp, 0x15, &phy2);
3150                         tg3_readphy(tp, 0x15, &phy2);
3151
3152                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3153                                 /* We have signal detect and not receiving
3154                                  * config code words, link is up by parallel
3155                                  * detection.
3156                                  */
3157
3158                                 bmcr &= ~BMCR_ANENABLE;
3159                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3160                                 tg3_writephy(tp, MII_BMCR, bmcr);
3161                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3162                         }
3163                 }
3164         }
3165         else if (netif_carrier_ok(tp->dev) &&
3166                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3167                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3168                 u32 phy2;
3169
3170                 /* Select expansion interrupt status register */
3171                 tg3_writephy(tp, 0x17, 0x0f01);
3172                 tg3_readphy(tp, 0x15, &phy2);
3173                 if (phy2 & 0x20) {
3174                         u32 bmcr;
3175
3176                         /* Config code words received, turn on autoneg. */
3177                         tg3_readphy(tp, MII_BMCR, &bmcr);
3178                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3179
3180                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3181
3182                 }
3183         }
3184 }
3185
3186 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3187 {
3188         int err;
3189
3190         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3191                 err = tg3_setup_fiber_phy(tp, force_reset);
3192         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3193                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3194         } else {
3195                 err = tg3_setup_copper_phy(tp, force_reset);
3196         }
3197
3198         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3199             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3200                 u32 val, scale;
3201
3202                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3203                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3204                         scale = 65;
3205                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3206                         scale = 6;
3207                 else
3208                         scale = 12;
3209
3210                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3211                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3212                 tw32(GRC_MISC_CFG, val);
3213         }
3214
3215         if (tp->link_config.active_speed == SPEED_1000 &&
3216             tp->link_config.active_duplex == DUPLEX_HALF)
3217                 tw32(MAC_TX_LENGTHS,
3218                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3219                       (6 << TX_LENGTHS_IPG_SHIFT) |
3220                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3221         else
3222                 tw32(MAC_TX_LENGTHS,
3223                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3224                       (6 << TX_LENGTHS_IPG_SHIFT) |
3225                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3226
3227         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3228                 if (netif_carrier_ok(tp->dev)) {
3229                         tw32(HOSTCC_STAT_COAL_TICKS,
3230                              tp->coal.stats_block_coalesce_usecs);
3231                 } else {
3232                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3233                 }
3234         }
3235
3236         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3237                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3238                 if (!netif_carrier_ok(tp->dev))
3239                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3240                               tp->pwrmgmt_thresh;
3241                 else
3242                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3243                 tw32(PCIE_PWR_MGMT_THRESH, val);
3244         }
3245
3246         return err;
3247 }
3248
3249 /* This is called whenever we suspect that the system chipset is re-
3250  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3251  * is bogus tx completions. We try to recover by setting the
3252  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3253  * in the workqueue.
3254  */
3255 static void tg3_tx_recover(struct tg3 *tp)
3256 {
3257         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3258                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3259
3260         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3261                "mapped I/O cycles to the network device, attempting to "
3262                "recover. Please report the problem to the driver maintainer "
3263                "and include system chipset information.\n", tp->dev->name);
3264
3265         spin_lock(&tp->lock);
3266         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3267         spin_unlock(&tp->lock);
3268 }
3269
3270 static inline u32 tg3_tx_avail(struct tg3 *tp)
3271 {
3272         smp_mb();
3273         return (tp->tx_pending -
3274                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3275 }
3276
3277 /* Tigon3 never reports partial packet sends.  So we do not
3278  * need special logic to handle SKBs that have not had all
3279  * of their frags sent yet, like SunGEM does.
3280  */
3281 static void tg3_tx(struct tg3 *tp)
3282 {
3283         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3284         u32 sw_idx = tp->tx_cons;
3285
3286         while (sw_idx != hw_idx) {
3287                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3288                 struct sk_buff *skb = ri->skb;
3289                 int i, tx_bug = 0;
3290
3291                 if (unlikely(skb == NULL)) {
3292                         tg3_tx_recover(tp);
3293                         return;
3294                 }
3295
3296                 pci_unmap_single(tp->pdev,
3297                                  pci_unmap_addr(ri, mapping),
3298                                  skb_headlen(skb),
3299                                  PCI_DMA_TODEVICE);
3300
3301                 ri->skb = NULL;
3302
3303                 sw_idx = NEXT_TX(sw_idx);
3304
3305                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3306                         ri = &tp->tx_buffers[sw_idx];
3307                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3308                                 tx_bug = 1;
3309
3310                         pci_unmap_page(tp->pdev,
3311                                        pci_unmap_addr(ri, mapping),
3312                                        skb_shinfo(skb)->frags[i].size,
3313                                        PCI_DMA_TODEVICE);
3314
3315                         sw_idx = NEXT_TX(sw_idx);
3316                 }
3317
3318                 dev_kfree_skb(skb);
3319
3320                 if (unlikely(tx_bug)) {
3321                         tg3_tx_recover(tp);
3322                         return;
3323                 }
3324         }
3325
3326         tp->tx_cons = sw_idx;
3327
3328         /* Need to make the tx_cons update visible to tg3_start_xmit()
3329          * before checking for netif_queue_stopped().  Without the
3330          * memory barrier, there is a small possibility that tg3_start_xmit()
3331          * will miss it and cause the queue to be stopped forever.
3332          */
3333         smp_mb();
3334
3335         if (unlikely(netif_queue_stopped(tp->dev) &&
3336                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3337                 netif_tx_lock(tp->dev);
3338                 if (netif_queue_stopped(tp->dev) &&
3339                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3340                         netif_wake_queue(tp->dev);
3341                 netif_tx_unlock(tp->dev);
3342         }
3343 }
3344
3345 /* Returns size of skb allocated or < 0 on error.
3346  *
3347  * We only need to fill in the address because the other members
3348  * of the RX descriptor are invariant, see tg3_init_rings.
3349  *
3350  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3351  * posting buffers we only dirty the first cache line of the RX
3352  * descriptor (containing the address).  Whereas for the RX status
3353  * buffers the cpu only reads the last cacheline of the RX descriptor
3354  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3355  */
3356 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3357                             int src_idx, u32 dest_idx_unmasked)
3358 {
3359         struct tg3_rx_buffer_desc *desc;
3360         struct ring_info *map, *src_map;
3361         struct sk_buff *skb;
3362         dma_addr_t mapping;
3363         int skb_size, dest_idx;
3364
3365         src_map = NULL;
3366         switch (opaque_key) {
3367         case RXD_OPAQUE_RING_STD:
3368                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3369                 desc = &tp->rx_std[dest_idx];
3370                 map = &tp->rx_std_buffers[dest_idx];
3371                 if (src_idx >= 0)
3372                         src_map = &tp->rx_std_buffers[src_idx];
3373                 skb_size = tp->rx_pkt_buf_sz;
3374                 break;
3375
3376         case RXD_OPAQUE_RING_JUMBO:
3377                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3378                 desc = &tp->rx_jumbo[dest_idx];
3379                 map = &tp->rx_jumbo_buffers[dest_idx];
3380                 if (src_idx >= 0)
3381                         src_map = &tp->rx_jumbo_buffers[src_idx];
3382                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3383                 break;
3384
3385         default:
3386                 return -EINVAL;
3387         };
3388
3389         /* Do not overwrite any of the map or rp information
3390          * until we are sure we can commit to a new buffer.
3391          *
3392          * Callers depend upon this behavior and assume that
3393          * we leave everything unchanged if we fail.
3394          */
3395         skb = netdev_alloc_skb(tp->dev, skb_size);
3396         if (skb == NULL)
3397                 return -ENOMEM;
3398
3399         skb_reserve(skb, tp->rx_offset);
3400
3401         mapping = pci_map_single(tp->pdev, skb->data,
3402                                  skb_size - tp->rx_offset,
3403                                  PCI_DMA_FROMDEVICE);
3404
3405         map->skb = skb;
3406         pci_unmap_addr_set(map, mapping, mapping);
3407
3408         if (src_map != NULL)
3409                 src_map->skb = NULL;
3410
3411         desc->addr_hi = ((u64)mapping >> 32);
3412         desc->addr_lo = ((u64)mapping & 0xffffffff);
3413
3414         return skb_size;
3415 }
3416
3417 /* We only need to move over in the address because the other
3418  * members of the RX descriptor are invariant.  See notes above
3419  * tg3_alloc_rx_skb for full details.
3420  */
3421 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3422                            int src_idx, u32 dest_idx_unmasked)
3423 {
3424         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3425         struct ring_info *src_map, *dest_map;
3426         int dest_idx;
3427
3428         switch (opaque_key) {
3429         case RXD_OPAQUE_RING_STD:
3430                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3431                 dest_desc = &tp->rx_std[dest_idx];
3432                 dest_map = &tp->rx_std_buffers[dest_idx];
3433                 src_desc = &tp->rx_std[src_idx];
3434                 src_map = &tp->rx_std_buffers[src_idx];
3435                 break;
3436
3437         case RXD_OPAQUE_RING_JUMBO:
3438                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3439                 dest_desc = &tp->rx_jumbo[dest_idx];
3440                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3441                 src_desc = &tp->rx_jumbo[src_idx];
3442                 src_map = &tp->rx_jumbo_buffers[src_idx];
3443                 break;
3444
3445         default:
3446                 return;
3447         };
3448
3449         dest_map->skb = src_map->skb;
3450         pci_unmap_addr_set(dest_map, mapping,
3451                            pci_unmap_addr(src_map, mapping));
3452         dest_desc->addr_hi = src_desc->addr_hi;
3453         dest_desc->addr_lo = src_desc->addr_lo;
3454
3455         src_map->skb = NULL;
3456 }
3457
3458 #if TG3_VLAN_TAG_USED
3459 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3460 {
3461         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3462 }
3463 #endif
3464
3465 /* The RX ring scheme is composed of multiple rings which post fresh
3466  * buffers to the chip, and one special ring the chip uses to report
3467  * status back to the host.
3468  *
3469  * The special ring reports the status of received packets to the
3470  * host.  The chip does not write into the original descriptor the
3471  * RX buffer was obtained from.  The chip simply takes the original
3472  * descriptor as provided by the host, updates the status and length
3473  * field, then writes this into the next status ring entry.
3474  *
3475  * Each ring the host uses to post buffers to the chip is described
3476  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3477  * it is first placed into the on-chip ram.  When the packet's length
3478  * is known, it walks down the TG3_BDINFO entries to select the ring.
3479  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3480  * which is within the range of the new packet's length is chosen.
3481  *
3482  * The "separate ring for rx status" scheme may sound queer, but it makes
3483  * sense from a cache coherency perspective.  If only the host writes
3484  * to the buffer post rings, and only the chip writes to the rx status
3485  * rings, then cache lines never move beyond shared-modified state.
3486  * If both the host and chip were to write into the same ring, cache line
3487  * eviction could occur since both entities want it in an exclusive state.
3488  */
3489 static int tg3_rx(struct tg3 *tp, int budget)
3490 {
3491         u32 work_mask, rx_std_posted = 0;
3492         u32 sw_idx = tp->rx_rcb_ptr;
3493         u16 hw_idx;
3494         int received;
3495
3496         hw_idx = tp->hw_status->idx[0].rx_producer;
3497         /*
3498          * We need to order the read of hw_idx and the read of
3499          * the opaque cookie.
3500          */
3501         rmb();
3502         work_mask = 0;
3503         received = 0;
3504         while (sw_idx != hw_idx && budget > 0) {
3505                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3506                 unsigned int len;
3507                 struct sk_buff *skb;
3508                 dma_addr_t dma_addr;
3509                 u32 opaque_key, desc_idx, *post_ptr;
3510
3511                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3512                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3513                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3514                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3515                                                   mapping);
3516                         skb = tp->rx_std_buffers[desc_idx].skb;
3517                         post_ptr = &tp->rx_std_ptr;
3518                         rx_std_posted++;
3519                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3520                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3521                                                   mapping);
3522                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3523                         post_ptr = &tp->rx_jumbo_ptr;
3524                 }
3525                 else {
3526                         goto next_pkt_nopost;
3527                 }
3528
3529                 work_mask |= opaque_key;
3530
3531                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3532                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3533                 drop_it:
3534                         tg3_recycle_rx(tp, opaque_key,
3535                                        desc_idx, *post_ptr);
3536                 drop_it_no_recycle:
3537                         /* Other statistics kept track of by card. */
3538                         tp->net_stats.rx_dropped++;
3539                         goto next_pkt;
3540                 }
3541
3542                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3543
3544                 if (len > RX_COPY_THRESHOLD
3545                         && tp->rx_offset == 2
3546                         /* rx_offset != 2 iff this is a 5701 card running
3547                          * in PCI-X mode [see tg3_get_invariants()] */
3548                 ) {
3549                         int skb_size;
3550
3551                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3552                                                     desc_idx, *post_ptr);
3553                         if (skb_size < 0)
3554                                 goto drop_it;
3555
3556                         pci_unmap_single(tp->pdev, dma_addr,
3557                                          skb_size - tp->rx_offset,
3558                                          PCI_DMA_FROMDEVICE);
3559
3560                         skb_put(skb, len);
3561                 } else {
3562                         struct sk_buff *copy_skb;
3563
3564                         tg3_recycle_rx(tp, opaque_key,
3565                                        desc_idx, *post_ptr);
3566
3567                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3568                         if (copy_skb == NULL)
3569                                 goto drop_it_no_recycle;
3570
3571                         skb_reserve(copy_skb, 2);
3572                         skb_put(copy_skb, len);
3573                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3574                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3575                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3576
3577                         /* We'll reuse the original ring buffer. */
3578                         skb = copy_skb;
3579                 }
3580
3581                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3582                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3583                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3584                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3585                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3586                 else
3587                         skb->ip_summed = CHECKSUM_NONE;
3588
3589                 skb->protocol = eth_type_trans(skb, tp->dev);
3590 #if TG3_VLAN_TAG_USED
3591                 if (tp->vlgrp != NULL &&
3592                     desc->type_flags & RXD_FLAG_VLAN) {
3593                         tg3_vlan_rx(tp, skb,
3594                                     desc->err_vlan & RXD_VLAN_MASK);
3595                 } else
3596 #endif
3597                         netif_receive_skb(skb);
3598
3599                 tp->dev->last_rx = jiffies;
3600                 received++;
3601                 budget--;
3602
3603 next_pkt:
3604                 (*post_ptr)++;
3605
3606                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3607                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3608
3609                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3610                                      TG3_64BIT_REG_LOW, idx);
3611                         work_mask &= ~RXD_OPAQUE_RING_STD;
3612                         rx_std_posted = 0;
3613                 }
3614 next_pkt_nopost:
3615                 sw_idx++;
3616                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3617
3618                 /* Refresh hw_idx to see if there is new work */
3619                 if (sw_idx == hw_idx) {
3620                         hw_idx = tp->hw_status->idx[0].rx_producer;
3621                         rmb();
3622                 }
3623         }
3624
3625         /* ACK the status ring. */
3626         tp->rx_rcb_ptr = sw_idx;
3627         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3628
3629         /* Refill RX ring(s). */
3630         if (work_mask & RXD_OPAQUE_RING_STD) {
3631                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3632                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3633                              sw_idx);
3634         }
3635         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3636                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3637                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3638                              sw_idx);
3639         }
3640         mmiowb();
3641
3642         return received;
3643 }
3644
3645 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3646 {
3647         struct tg3_hw_status *sblk = tp->hw_status;
3648
3649         /* handle link change and other phy events */
3650         if (!(tp->tg3_flags &
3651               (TG3_FLAG_USE_LINKCHG_REG |
3652                TG3_FLAG_POLL_SERDES))) {
3653                 if (sblk->status & SD_STATUS_LINK_CHG) {
3654                         sblk->status = SD_STATUS_UPDATED |
3655                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3656                         spin_lock(&tp->lock);
3657                         tg3_setup_phy(tp, 0);
3658                         spin_unlock(&tp->lock);
3659                 }
3660         }
3661
3662         /* run TX completion thread */
3663         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3664                 tg3_tx(tp);
3665                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3666                         return work_done;
3667         }
3668
3669         /* run RX thread, within the bounds set by NAPI.
3670          * All RX "locking" is done by ensuring outside
3671          * code synchronizes with tg3->napi.poll()
3672          */
3673         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3674                 work_done += tg3_rx(tp, budget - work_done);
3675
3676         return work_done;
3677 }
3678
3679 static int tg3_poll(struct napi_struct *napi, int budget)
3680 {
3681         struct tg3 *tp = container_of(napi, struct tg3, napi);
3682         int work_done = 0;
3683         struct tg3_hw_status *sblk = tp->hw_status;
3684
3685         while (1) {
3686                 work_done = tg3_poll_work(tp, work_done, budget);
3687
3688                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3689                         goto tx_recovery;
3690
3691                 if (unlikely(work_done >= budget))
3692                         break;
3693
3694                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3695                         /* tp->last_tag is used in tg3_restart_ints() below
3696                          * to tell the hw how much work has been processed,
3697                          * so we must read it before checking for more work.
3698                          */
3699                         tp->last_tag = sblk->status_tag;
3700                         rmb();
3701                 } else
3702                         sblk->status &= ~SD_STATUS_UPDATED;
3703
3704                 if (likely(!tg3_has_work(tp))) {
3705                         netif_rx_complete(tp->dev, napi);
3706                         tg3_restart_ints(tp);
3707                         break;
3708                 }
3709         }
3710
3711         return work_done;
3712
3713 tx_recovery:
3714         /* work_done is guaranteed to be less than budget. */
3715         netif_rx_complete(tp->dev, napi);
3716         schedule_work(&tp->reset_task);
3717         return work_done;
3718 }
3719
3720 static void tg3_irq_quiesce(struct tg3 *tp)
3721 {
3722         BUG_ON(tp->irq_sync);
3723
3724         tp->irq_sync = 1;
3725         smp_mb();
3726
3727         synchronize_irq(tp->pdev->irq);
3728 }
3729
3730 static inline int tg3_irq_sync(struct tg3 *tp)
3731 {
3732         return tp->irq_sync;
3733 }
3734
3735 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3736  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3737  * with as well.  Most of the time, this is not necessary except when
3738  * shutting down the device.
3739  */
3740 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3741 {
3742         spin_lock_bh(&tp->lock);
3743         if (irq_sync)
3744                 tg3_irq_quiesce(tp);
3745 }
3746
3747 static inline void tg3_full_unlock(struct tg3 *tp)
3748 {
3749         spin_unlock_bh(&tp->lock);
3750 }
3751
3752 /* One-shot MSI handler - Chip automatically disables interrupt
3753  * after sending MSI so driver doesn't have to do it.
3754  */
3755 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3756 {
3757         struct net_device *dev = dev_id;
3758         struct tg3 *tp = netdev_priv(dev);
3759
3760         prefetch(tp->hw_status);
3761         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3762
3763         if (likely(!tg3_irq_sync(tp)))
3764                 netif_rx_schedule(dev, &tp->napi);
3765
3766         return IRQ_HANDLED;
3767 }
3768
3769 /* MSI ISR - No need to check for interrupt sharing and no need to
3770  * flush status block and interrupt mailbox. PCI ordering rules
3771  * guarantee that MSI will arrive after the status block.
3772  */
3773 static irqreturn_t tg3_msi(int irq, void *dev_id)
3774 {
3775         struct net_device *dev = dev_id;
3776         struct tg3 *tp = netdev_priv(dev);
3777
3778         prefetch(tp->hw_status);
3779         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3780         /*
3781          * Writing any value to intr-mbox-0 clears PCI INTA# and
3782          * chip-internal interrupt pending events.
3783          * Writing non-zero to intr-mbox-0 additional tells the
3784          * NIC to stop sending us irqs, engaging "in-intr-handler"
3785          * event coalescing.
3786          */
3787         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3788         if (likely(!tg3_irq_sync(tp)))
3789                 netif_rx_schedule(dev, &tp->napi);
3790
3791         return IRQ_RETVAL(1);
3792 }
3793
3794 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3795 {
3796         struct net_device *dev = dev_id;
3797         struct tg3 *tp = netdev_priv(dev);
3798         struct tg3_hw_status *sblk = tp->hw_status;
3799         unsigned int handled = 1;
3800
3801         /* In INTx mode, it is possible for the interrupt to arrive at
3802          * the CPU before the status block posted prior to the interrupt.
3803          * Reading the PCI State register will confirm whether the
3804          * interrupt is ours and will flush the status block.
3805          */
3806         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3807                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3808                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3809                         handled = 0;
3810                         goto out;
3811                 }
3812         }
3813
3814         /*
3815          * Writing any value to intr-mbox-0 clears PCI INTA# and
3816          * chip-internal interrupt pending events.
3817          * Writing non-zero to intr-mbox-0 additional tells the
3818          * NIC to stop sending us irqs, engaging "in-intr-handler"
3819          * event coalescing.
3820          *
3821          * Flush the mailbox to de-assert the IRQ immediately to prevent
3822          * spurious interrupts.  The flush impacts performance but
3823          * excessive spurious interrupts can be worse in some cases.
3824          */
3825         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3826         if (tg3_irq_sync(tp))
3827                 goto out;
3828         sblk->status &= ~SD_STATUS_UPDATED;
3829         if (likely(tg3_has_work(tp))) {
3830                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3831                 netif_rx_schedule(dev, &tp->napi);
3832         } else {
3833                 /* No work, shared interrupt perhaps?  re-enable
3834                  * interrupts, and flush that PCI write
3835                  */
3836                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3837                                0x00000000);
3838         }
3839 out:
3840         return IRQ_RETVAL(handled);
3841 }
3842
3843 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3844 {
3845         struct net_device *dev = dev_id;
3846         struct tg3 *tp = netdev_priv(dev);
3847         struct tg3_hw_status *sblk = tp->hw_status;
3848         unsigned int handled = 1;
3849
3850         /* In INTx mode, it is possible for the interrupt to arrive at
3851          * the CPU before the status block posted prior to the interrupt.
3852          * Reading the PCI State register will confirm whether the
3853          * interrupt is ours and will flush the status block.
3854          */
3855         if (unlikely(sblk->status_tag == tp->last_tag)) {
3856                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3857                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3858                         handled = 0;
3859                         goto out;
3860                 }
3861         }
3862
3863         /*
3864          * writing any value to intr-mbox-0 clears PCI INTA# and
3865          * chip-internal interrupt pending events.
3866          * writing non-zero to intr-mbox-0 additional tells the
3867          * NIC to stop sending us irqs, engaging "in-intr-handler"
3868          * event coalescing.
3869          *
3870          * Flush the mailbox to de-assert the IRQ immediately to prevent
3871          * spurious interrupts.  The flush impacts performance but
3872          * excessive spurious interrupts can be worse in some cases.
3873          */
3874         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3875         if (tg3_irq_sync(tp))
3876                 goto out;
3877         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3878                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3879                 /* Update last_tag to mark that this status has been
3880                  * seen. Because interrupt may be shared, we may be
3881                  * racing with tg3_poll(), so only update last_tag
3882                  * if tg3_poll() is not scheduled.
3883                  */
3884                 tp->last_tag = sblk->status_tag;
3885                 __netif_rx_schedule(dev, &tp->napi);
3886         }
3887 out:
3888         return IRQ_RETVAL(handled);
3889 }
3890
3891 /* ISR for interrupt test */
3892 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3893 {
3894         struct net_device *dev = dev_id;
3895         struct tg3 *tp = netdev_priv(dev);
3896         struct tg3_hw_status *sblk = tp->hw_status;
3897
3898         if ((sblk->status & SD_STATUS_UPDATED) ||
3899             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3900                 tg3_disable_ints(tp);
3901                 return IRQ_RETVAL(1);
3902         }
3903         return IRQ_RETVAL(0);
3904 }
3905
3906 static int tg3_init_hw(struct tg3 *, int);
3907 static int tg3_halt(struct tg3 *, int, int);
3908
3909 /* Restart hardware after configuration changes, self-test, etc.
3910  * Invoked with tp->lock held.
3911  */
3912 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3913 {
3914         int err;
3915
3916         err = tg3_init_hw(tp, reset_phy);
3917         if (err) {
3918                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3919                        "aborting.\n", tp->dev->name);
3920                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3921                 tg3_full_unlock(tp);
3922                 del_timer_sync(&tp->timer);
3923                 tp->irq_sync = 0;
3924                 napi_enable(&tp->napi);
3925                 dev_close(tp->dev);
3926                 tg3_full_lock(tp, 0);
3927         }
3928         return err;
3929 }
3930
3931 #ifdef CONFIG_NET_POLL_CONTROLLER
3932 static void tg3_poll_controller(struct net_device *dev)
3933 {
3934         struct tg3 *tp = netdev_priv(dev);
3935
3936         tg3_interrupt(tp->pdev->irq, dev);
3937 }
3938 #endif
3939
3940 static void tg3_reset_task(struct work_struct *work)
3941 {
3942         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3943         unsigned int restart_timer;
3944
3945         tg3_full_lock(tp, 0);
3946
3947         if (!netif_running(tp->dev)) {
3948                 tg3_full_unlock(tp);
3949                 return;
3950         }
3951
3952         tg3_full_unlock(tp);
3953
3954         tg3_netif_stop(tp);
3955
3956         tg3_full_lock(tp, 1);
3957
3958         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3959         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3960
3961         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3962                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3963                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3964                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3965                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3966         }
3967
3968         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3969         if (tg3_init_hw(tp, 1))
3970                 goto out;
3971
3972         tg3_netif_start(tp);
3973
3974         if (restart_timer)
3975                 mod_timer(&tp->timer, jiffies + 1);
3976
3977 out:
3978         tg3_full_unlock(tp);
3979 }
3980
3981 static void tg3_dump_short_state(struct tg3 *tp)
3982 {
3983         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3984                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3985         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3986                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3987 }
3988
3989 static void tg3_tx_timeout(struct net_device *dev)
3990 {
3991         struct tg3 *tp = netdev_priv(dev);
3992
3993         if (netif_msg_tx_err(tp)) {
3994                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3995                        dev->name);
3996                 tg3_dump_short_state(tp);
3997         }
3998
3999         schedule_work(&tp->reset_task);
4000 }
4001
4002 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4003 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4004 {
4005         u32 base = (u32) mapping & 0xffffffff;
4006
4007         return ((base > 0xffffdcc0) &&
4008                 (base + len + 8 < base));
4009 }
4010
4011 /* Test for DMA addresses > 40-bit */
4012 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4013                                           int len)
4014 {
4015 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4016         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4017                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4018         return 0;
4019 #else
4020         return 0;
4021 #endif
4022 }
4023
4024 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4025
4026 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4027 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4028                                        u32 last_plus_one, u32 *start,
4029                                        u32 base_flags, u32 mss)
4030 {
4031         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
4032         dma_addr_t new_addr = 0;
4033         u32 entry = *start;
4034         int i, ret = 0;
4035
4036         if (!new_skb) {
4037                 ret = -1;
4038         } else {
4039                 /* New SKB is guaranteed to be linear. */
4040                 entry = *start;
4041                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4042                                           PCI_DMA_TODEVICE);
4043                 /* Make sure new skb does not cross any 4G boundaries.
4044                  * Drop the packet if it does.
4045                  */
4046                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4047                         ret = -1;
4048                         dev_kfree_skb(new_skb);
4049                         new_skb = NULL;
4050                 } else {
4051                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4052                                     base_flags, 1 | (mss << 1));
4053                         *start = NEXT_TX(entry);
4054                 }
4055         }
4056
4057         /* Now clean up the sw ring entries. */
4058         i = 0;
4059         while (entry != last_plus_one) {
4060                 int len;
4061
4062                 if (i == 0)
4063                         len = skb_headlen(skb);
4064                 else
4065                         len = skb_shinfo(skb)->frags[i-1].size;
4066                 pci_unmap_single(tp->pdev,
4067                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4068                                  len, PCI_DMA_TODEVICE);
4069                 if (i == 0) {
4070                         tp->tx_buffers[entry].skb = new_skb;
4071                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4072                 } else {
4073                         tp->tx_buffers[entry].skb = NULL;
4074                 }
4075                 entry = NEXT_TX(entry);
4076                 i++;
4077         }
4078
4079         dev_kfree_skb(skb);
4080
4081         return ret;
4082 }
4083
4084 static void tg3_set_txd(struct tg3 *tp, int entry,
4085                         dma_addr_t mapping, int len, u32 flags,
4086                         u32 mss_and_is_end)
4087 {
4088         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4089         int is_end = (mss_and_is_end & 0x1);
4090         u32 mss = (mss_and_is_end >> 1);
4091         u32 vlan_tag = 0;
4092
4093         if (is_end)
4094                 flags |= TXD_FLAG_END;
4095         if (flags & TXD_FLAG_VLAN) {
4096                 vlan_tag = flags >> 16;
4097                 flags &= 0xffff;
4098         }
4099         vlan_tag |= (mss << TXD_MSS_SHIFT);
4100
4101         txd->addr_hi = ((u64) mapping >> 32);
4102         txd->addr_lo = ((u64) mapping & 0xffffffff);
4103         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4104         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4105 }
4106
4107 /* hard_start_xmit for devices that don't have any bugs and
4108  * support TG3_FLG2_HW_TSO_2 only.
4109  */
4110 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4111 {
4112         struct tg3 *tp = netdev_priv(dev);
4113         dma_addr_t mapping;
4114         u32 len, entry, base_flags, mss;
4115
4116         len = skb_headlen(skb);
4117
4118         /* We are running in BH disabled context with netif_tx_lock
4119          * and TX reclaim runs via tp->napi.poll inside of a software
4120          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4121          * no IRQ context deadlocks to worry about either.  Rejoice!
4122          */
4123         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4124                 if (!netif_queue_stopped(dev)) {
4125                         netif_stop_queue(dev);
4126
4127                         /* This is a hard error, log it. */
4128                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4129                                "queue awake!\n", dev->name);
4130                 }
4131                 return NETDEV_TX_BUSY;
4132         }
4133
4134         entry = tp->tx_prod;
4135         base_flags = 0;
4136         mss = 0;
4137         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4138                 int tcp_opt_len, ip_tcp_len;
4139
4140                 if (skb_header_cloned(skb) &&
4141                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4142                         dev_kfree_skb(skb);
4143                         goto out_unlock;
4144                 }
4145
4146                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4147                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4148                 else {
4149                         struct iphdr *iph = ip_hdr(skb);
4150
4151                         tcp_opt_len = tcp_optlen(skb);
4152                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4153
4154                         iph->check = 0;
4155                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4156                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4157                 }
4158
4159                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4160                                TXD_FLAG_CPU_POST_DMA);
4161
4162                 tcp_hdr(skb)->check = 0;
4163
4164         }
4165         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4166                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4167 #if TG3_VLAN_TAG_USED
4168         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4169                 base_flags |= (TXD_FLAG_VLAN |
4170                                (vlan_tx_tag_get(skb) << 16));
4171 #endif
4172
4173         /* Queue skb data, a.k.a. the main skb fragment. */
4174         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4175
4176         tp->tx_buffers[entry].skb = skb;
4177         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4178
4179         tg3_set_txd(tp, entry, mapping, len, base_flags,
4180                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4181
4182         entry = NEXT_TX(entry);
4183
4184         /* Now loop through additional data fragments, and queue them. */
4185         if (skb_shinfo(skb)->nr_frags > 0) {
4186                 unsigned int i, last;
4187
4188                 last = skb_shinfo(skb)->nr_frags - 1;
4189                 for (i = 0; i <= last; i++) {
4190                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4191
4192                         len = frag->size;
4193                         mapping = pci_map_page(tp->pdev,
4194                                                frag->page,
4195                                                frag->page_offset,
4196                                                len, PCI_DMA_TODEVICE);
4197
4198                         tp->tx_buffers[entry].skb = NULL;
4199                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4200
4201                         tg3_set_txd(tp, entry, mapping, len,
4202                                     base_flags, (i == last) | (mss << 1));
4203
4204                         entry = NEXT_TX(entry);
4205                 }
4206         }
4207
4208         /* Packets are ready, update Tx producer idx local and on card. */
4209         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4210
4211         tp->tx_prod = entry;
4212         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4213                 netif_stop_queue(dev);
4214                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4215                         netif_wake_queue(tp->dev);
4216         }
4217
4218 out_unlock:
4219         mmiowb();
4220
4221         dev->trans_start = jiffies;
4222
4223         return NETDEV_TX_OK;
4224 }
4225
4226 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4227
4228 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4229  * TSO header is greater than 80 bytes.
4230  */
4231 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4232 {
4233         struct sk_buff *segs, *nskb;
4234
4235         /* Estimate the number of fragments in the worst case */
4236         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4237                 netif_stop_queue(tp->dev);
4238                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4239                         return NETDEV_TX_BUSY;
4240
4241                 netif_wake_queue(tp->dev);
4242         }
4243
4244         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4245         if (unlikely(IS_ERR(segs)))
4246                 goto tg3_tso_bug_end;
4247
4248         do {
4249                 nskb = segs;
4250                 segs = segs->next;
4251                 nskb->next = NULL;
4252                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4253         } while (segs);
4254
4255 tg3_tso_bug_end:
4256         dev_kfree_skb(skb);
4257
4258         return NETDEV_TX_OK;
4259 }
4260
4261 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4262  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4263  */
4264 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4265 {
4266         struct tg3 *tp = netdev_priv(dev);
4267         dma_addr_t mapping;
4268         u32 len, entry, base_flags, mss;
4269         int would_hit_hwbug;
4270
4271         len = skb_headlen(skb);
4272
4273         /* We are running in BH disabled context with netif_tx_lock
4274          * and TX reclaim runs via tp->napi.poll inside of a software
4275          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4276          * no IRQ context deadlocks to worry about either.  Rejoice!
4277          */
4278         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4279                 if (!netif_queue_stopped(dev)) {
4280                         netif_stop_queue(dev);
4281
4282                         /* This is a hard error, log it. */
4283                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4284                                "queue awake!\n", dev->name);
4285                 }
4286                 return NETDEV_TX_BUSY;
4287         }
4288
4289         entry = tp->tx_prod;
4290         base_flags = 0;
4291         if (skb->ip_summed == CHECKSUM_PARTIAL)
4292                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4293         mss = 0;
4294         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4295                 struct iphdr *iph;
4296                 int tcp_opt_len, ip_tcp_len, hdr_len;
4297
4298                 if (skb_header_cloned(skb) &&
4299                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4300                         dev_kfree_skb(skb);
4301                         goto out_unlock;
4302                 }
4303
4304                 tcp_opt_len = tcp_optlen(skb);
4305                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4306
4307                 hdr_len = ip_tcp_len + tcp_opt_len;
4308                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4309                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4310                         return (tg3_tso_bug(tp, skb));
4311
4312                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4313                                TXD_FLAG_CPU_POST_DMA);
4314
4315                 iph = ip_hdr(skb);
4316                 iph->check = 0;
4317                 iph->tot_len = htons(mss + hdr_len);
4318                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4319                         tcp_hdr(skb)->check = 0;
4320                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4321                 } else
4322                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4323                                                                  iph->daddr, 0,
4324                                                                  IPPROTO_TCP,
4325                                                                  0);
4326
4327                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4328                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4329                         if (tcp_opt_len || iph->ihl > 5) {
4330                                 int tsflags;
4331
4332                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4333                                 mss |= (tsflags << 11);
4334                         }
4335                 } else {
4336                         if (tcp_opt_len || iph->ihl > 5) {
4337                                 int tsflags;
4338
4339                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4340                                 base_flags |= tsflags << 12;
4341                         }
4342                 }
4343         }
4344 #if TG3_VLAN_TAG_USED
4345         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4346                 base_flags |= (TXD_FLAG_VLAN |
4347                                (vlan_tx_tag_get(skb) << 16));
4348 #endif
4349
4350         /* Queue skb data, a.k.a. the main skb fragment. */
4351         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4352
4353         tp->tx_buffers[entry].skb = skb;
4354         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4355
4356         would_hit_hwbug = 0;
4357
4358         if (tg3_4g_overflow_test(mapping, len))
4359                 would_hit_hwbug = 1;
4360
4361         tg3_set_txd(tp, entry, mapping, len, base_flags,
4362                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4363
4364         entry = NEXT_TX(entry);
4365
4366         /* Now loop through additional data fragments, and queue them. */
4367         if (skb_shinfo(skb)->nr_frags > 0) {
4368                 unsigned int i, last;
4369
4370                 last = skb_shinfo(skb)->nr_frags - 1;
4371                 for (i = 0; i <= last; i++) {
4372                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4373
4374                         len = frag->size;
4375                         mapping = pci_map_page(tp->pdev,
4376                                                frag->page,
4377                                                frag->page_offset,
4378                                                len, PCI_DMA_TODEVICE);
4379
4380                         tp->tx_buffers[entry].skb = NULL;
4381                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4382
4383                         if (tg3_4g_overflow_test(mapping, len))
4384                                 would_hit_hwbug = 1;
4385
4386                         if (tg3_40bit_overflow_test(tp, mapping, len))
4387                                 would_hit_hwbug = 1;
4388
4389                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4390                                 tg3_set_txd(tp, entry, mapping, len,
4391                                             base_flags, (i == last)|(mss << 1));
4392                         else
4393                                 tg3_set_txd(tp, entry, mapping, len,
4394                                             base_flags, (i == last));
4395
4396                         entry = NEXT_TX(entry);
4397                 }
4398         }
4399
4400         if (would_hit_hwbug) {
4401                 u32 last_plus_one = entry;
4402                 u32 start;
4403
4404                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4405                 start &= (TG3_TX_RING_SIZE - 1);
4406
4407                 /* If the workaround fails due to memory/mapping
4408                  * failure, silently drop this packet.
4409                  */
4410                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4411                                                 &start, base_flags, mss))
4412                         goto out_unlock;
4413
4414                 entry = start;
4415         }
4416
4417         /* Packets are ready, update Tx producer idx local and on card. */
4418         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4419
4420         tp->tx_prod = entry;
4421         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4422                 netif_stop_queue(dev);
4423                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4424                         netif_wake_queue(tp->dev);
4425         }
4426
4427 out_unlock:
4428         mmiowb();
4429
4430         dev->trans_start = jiffies;
4431
4432         return NETDEV_TX_OK;
4433 }
4434
4435 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4436                                int new_mtu)
4437 {
4438         dev->mtu = new_mtu;
4439
4440         if (new_mtu > ETH_DATA_LEN) {
4441                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4442                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4443                         ethtool_op_set_tso(dev, 0);
4444                 }
4445                 else
4446                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4447         } else {
4448                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4449                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4450                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4451         }
4452 }
4453
4454 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4455 {
4456         struct tg3 *tp = netdev_priv(dev);
4457         int err;
4458
4459         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4460                 return -EINVAL;
4461
4462         if (!netif_running(dev)) {
4463                 /* We'll just catch it later when the
4464                  * device is up'd.
4465                  */
4466                 tg3_set_mtu(dev, tp, new_mtu);
4467                 return 0;
4468         }
4469
4470         tg3_netif_stop(tp);
4471
4472         tg3_full_lock(tp, 1);
4473
4474         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4475
4476         tg3_set_mtu(dev, tp, new_mtu);
4477
4478         err = tg3_restart_hw(tp, 0);
4479
4480         if (!err)
4481                 tg3_netif_start(tp);
4482
4483         tg3_full_unlock(tp);
4484
4485         return err;
4486 }
4487
4488 /* Free up pending packets in all rx/tx rings.
4489  *
4490  * The chip has been shut down and the driver detached from
4491  * the networking, so no interrupts or new tx packets will
4492  * end up in the driver.  tp->{tx,}lock is not held and we are not
4493  * in an interrupt context and thus may sleep.
4494  */
4495 static void tg3_free_rings(struct tg3 *tp)
4496 {
4497         struct ring_info *rxp;
4498         int i;
4499
4500         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4501                 rxp = &tp->rx_std_buffers[i];
4502
4503                 if (rxp->skb == NULL)
4504                         continue;
4505                 pci_unmap_single(tp->pdev,
4506                                  pci_unmap_addr(rxp, mapping),
4507                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4508                                  PCI_DMA_FROMDEVICE);
4509                 dev_kfree_skb_any(rxp->skb);
4510                 rxp->skb = NULL;
4511         }
4512
4513         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4514                 rxp = &tp->rx_jumbo_buffers[i];
4515
4516                 if (rxp->skb == NULL)
4517                         continue;
4518                 pci_unmap_single(tp->pdev,
4519                                  pci_unmap_addr(rxp, mapping),
4520                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4521                                  PCI_DMA_FROMDEVICE);
4522                 dev_kfree_skb_any(rxp->skb);
4523                 rxp->skb = NULL;
4524         }
4525
4526         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4527                 struct tx_ring_info *txp;
4528                 struct sk_buff *skb;
4529                 int j;
4530
4531                 txp = &tp->tx_buffers[i];
4532                 skb = txp->skb;
4533
4534                 if (skb == NULL) {
4535                         i++;
4536                         continue;
4537                 }
4538
4539                 pci_unmap_single(tp->pdev,
4540                                  pci_unmap_addr(txp, mapping),
4541                                  skb_headlen(skb),
4542                                  PCI_DMA_TODEVICE);
4543                 txp->skb = NULL;
4544
4545                 i++;
4546
4547                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4548                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4549                         pci_unmap_page(tp->pdev,
4550                                        pci_unmap_addr(txp, mapping),
4551                                        skb_shinfo(skb)->frags[j].size,
4552                                        PCI_DMA_TODEVICE);
4553                         i++;
4554                 }
4555
4556                 dev_kfree_skb_any(skb);
4557         }
4558 }
4559
4560 /* Initialize tx/rx rings for packet processing.
4561  *
4562  * The chip has been shut down and the driver detached from
4563  * the networking, so no interrupts or new tx packets will
4564  * end up in the driver.  tp->{tx,}lock are held and thus
4565  * we may not sleep.
4566  */
4567 static int tg3_init_rings(struct tg3 *tp)
4568 {
4569         u32 i;
4570
4571         /* Free up all the SKBs. */
4572         tg3_free_rings(tp);
4573
4574         /* Zero out all descriptors. */
4575         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4576         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4577         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4578         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4579
4580         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4581         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4582             (tp->dev->mtu > ETH_DATA_LEN))
4583                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4584
4585         /* Initialize invariants of the rings, we only set this
4586          * stuff once.  This works because the card does not
4587          * write into the rx buffer posting rings.
4588          */
4589         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4590                 struct tg3_rx_buffer_desc *rxd;
4591
4592                 rxd = &tp->rx_std[i];
4593                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4594                         << RXD_LEN_SHIFT;
4595                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4596                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4597                                (i << RXD_OPAQUE_INDEX_SHIFT));
4598         }
4599
4600         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4601                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4602                         struct tg3_rx_buffer_desc *rxd;
4603
4604                         rxd = &tp->rx_jumbo[i];
4605                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4606                                 << RXD_LEN_SHIFT;
4607                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4608                                 RXD_FLAG_JUMBO;
4609                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4610                                (i << RXD_OPAQUE_INDEX_SHIFT));
4611                 }
4612         }
4613
4614         /* Now allocate fresh SKBs for each rx ring. */
4615         for (i = 0; i < tp->rx_pending; i++) {
4616                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4617                         printk(KERN_WARNING PFX
4618                                "%s: Using a smaller RX standard ring, "
4619                                "only %d out of %d buffers were allocated "
4620                                "successfully.\n",
4621                                tp->dev->name, i, tp->rx_pending);
4622                         if (i == 0)
4623                                 return -ENOMEM;
4624                         tp->rx_pending = i;
4625                         break;
4626                 }
4627         }
4628
4629         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4630                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4631                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4632                                              -1, i) < 0) {
4633                                 printk(KERN_WARNING PFX
4634                                        "%s: Using a smaller RX jumbo ring, "
4635                                        "only %d out of %d buffers were "
4636                                        "allocated successfully.\n",
4637                                        tp->dev->name, i, tp->rx_jumbo_pending);
4638                                 if (i == 0) {
4639                                         tg3_free_rings(tp);
4640                                         return -ENOMEM;
4641                                 }
4642                                 tp->rx_jumbo_pending = i;
4643                                 break;
4644                         }
4645                 }
4646         }
4647         return 0;
4648 }
4649
4650 /*
4651  * Must not be invoked with interrupt sources disabled and
4652  * the hardware shutdown down.
4653  */
4654 static void tg3_free_consistent(struct tg3 *tp)
4655 {
4656         kfree(tp->rx_std_buffers);
4657         tp->rx_std_buffers = NULL;
4658         if (tp->rx_std) {
4659                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4660                                     tp->rx_std, tp->rx_std_mapping);
4661                 tp->rx_std = NULL;
4662         }
4663         if (tp->rx_jumbo) {
4664                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4665                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4666                 tp->rx_jumbo = NULL;
4667         }
4668         if (tp->rx_rcb) {
4669                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4670                                     tp->rx_rcb, tp->rx_rcb_mapping);
4671                 tp->rx_rcb = NULL;
4672         }
4673         if (tp->tx_ring) {
4674                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4675                         tp->tx_ring, tp->tx_desc_mapping);
4676                 tp->tx_ring = NULL;
4677         }
4678         if (tp->hw_status) {
4679                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4680                                     tp->hw_status, tp->status_mapping);
4681                 tp->hw_status = NULL;
4682         }
4683         if (tp->hw_stats) {
4684                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4685                                     tp->hw_stats, tp->stats_mapping);
4686                 tp->hw_stats = NULL;
4687         }
4688 }
4689
4690 /*
4691  * Must not be invoked with interrupt sources disabled and
4692  * the hardware shutdown down.  Can sleep.
4693  */
4694 static int tg3_alloc_consistent(struct tg3 *tp)
4695 {
4696         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4697                                       (TG3_RX_RING_SIZE +
4698                                        TG3_RX_JUMBO_RING_SIZE)) +
4699                                      (sizeof(struct tx_ring_info) *
4700                                       TG3_TX_RING_SIZE),
4701                                      GFP_KERNEL);
4702         if (!tp->rx_std_buffers)
4703                 return -ENOMEM;
4704
4705         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4706         tp->tx_buffers = (struct tx_ring_info *)
4707                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4708
4709         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4710                                           &tp->rx_std_mapping);
4711         if (!tp->rx_std)
4712                 goto err_out;
4713
4714         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4715                                             &tp->rx_jumbo_mapping);
4716
4717         if (!tp->rx_jumbo)
4718                 goto err_out;
4719
4720         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4721                                           &tp->rx_rcb_mapping);
4722         if (!tp->rx_rcb)
4723                 goto err_out;
4724
4725         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4726                                            &tp->tx_desc_mapping);
4727         if (!tp->tx_ring)
4728                 goto err_out;
4729
4730         tp->hw_status = pci_alloc_consistent(tp->pdev,
4731                                              TG3_HW_STATUS_SIZE,
4732                                              &tp->status_mapping);
4733         if (!tp->hw_status)
4734                 goto err_out;
4735
4736         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4737                                             sizeof(struct tg3_hw_stats),
4738                                             &tp->stats_mapping);
4739         if (!tp->hw_stats)
4740                 goto err_out;
4741
4742         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4743         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4744
4745         return 0;
4746
4747 err_out:
4748         tg3_free_consistent(tp);
4749         return -ENOMEM;
4750 }
4751
4752 #define MAX_WAIT_CNT 1000
4753
4754 /* To stop a block, clear the enable bit and poll till it
4755  * clears.  tp->lock is held.
4756  */
4757 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4758 {
4759         unsigned int i;
4760         u32 val;
4761
4762         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4763                 switch (ofs) {
4764                 case RCVLSC_MODE:
4765                 case DMAC_MODE:
4766                 case MBFREE_MODE:
4767                 case BUFMGR_MODE:
4768                 case MEMARB_MODE:
4769                         /* We can't enable/disable these bits of the
4770                          * 5705/5750, just say success.
4771                          */
4772                         return 0;
4773
4774                 default:
4775                         break;
4776                 };
4777         }
4778
4779         val = tr32(ofs);
4780         val &= ~enable_bit;
4781         tw32_f(ofs, val);
4782
4783         for (i = 0; i < MAX_WAIT_CNT; i++) {
4784                 udelay(100);
4785                 val = tr32(ofs);
4786                 if ((val & enable_bit) == 0)
4787                         break;
4788         }
4789
4790         if (i == MAX_WAIT_CNT && !silent) {
4791                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4792                        "ofs=%lx enable_bit=%x\n",
4793                        ofs, enable_bit);
4794                 return -ENODEV;
4795         }
4796
4797         return 0;
4798 }
4799
4800 /* tp->lock is held. */
4801 static int tg3_abort_hw(struct tg3 *tp, int silent)
4802 {
4803         int i, err;
4804
4805         tg3_disable_ints(tp);
4806
4807         tp->rx_mode &= ~RX_MODE_ENABLE;
4808         tw32_f(MAC_RX_MODE, tp->rx_mode);
4809         udelay(10);
4810
4811         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4812         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4813         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4814         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4815         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4816         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4817
4818         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4819         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4820         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4821         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4822         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4823         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4824         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4825
4826         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4827         tw32_f(MAC_MODE, tp->mac_mode);
4828         udelay(40);
4829
4830         tp->tx_mode &= ~TX_MODE_ENABLE;
4831         tw32_f(MAC_TX_MODE, tp->tx_mode);
4832
4833         for (i = 0; i < MAX_WAIT_CNT; i++) {
4834                 udelay(100);
4835                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4836                         break;
4837         }
4838         if (i >= MAX_WAIT_CNT) {
4839                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4840                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4841                        tp->dev->name, tr32(MAC_TX_MODE));
4842                 err |= -ENODEV;
4843         }
4844
4845         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4846         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4847         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4848
4849         tw32(FTQ_RESET, 0xffffffff);
4850         tw32(FTQ_RESET, 0x00000000);
4851
4852         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4853         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4854
4855         if (tp->hw_status)
4856                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4857         if (tp->hw_stats)
4858                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4859
4860         return err;
4861 }
4862
4863 /* tp->lock is held. */
4864 static int tg3_nvram_lock(struct tg3 *tp)
4865 {
4866         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4867                 int i;
4868
4869                 if (tp->nvram_lock_cnt == 0) {
4870                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4871                         for (i = 0; i < 8000; i++) {
4872                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4873                                         break;
4874                                 udelay(20);
4875                         }
4876                         if (i == 8000) {
4877                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4878                                 return -ENODEV;
4879                         }
4880                 }
4881                 tp->nvram_lock_cnt++;
4882         }
4883         return 0;
4884 }
4885
4886 /* tp->lock is held. */
4887 static void tg3_nvram_unlock(struct tg3 *tp)
4888 {
4889         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4890                 if (tp->nvram_lock_cnt > 0)
4891                         tp->nvram_lock_cnt--;
4892                 if (tp->nvram_lock_cnt == 0)
4893                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4894         }
4895 }
4896
4897 /* tp->lock is held. */
4898 static void tg3_enable_nvram_access(struct tg3 *tp)
4899 {
4900         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4901             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4902                 u32 nvaccess = tr32(NVRAM_ACCESS);
4903
4904                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4905         }
4906 }
4907
4908 /* tp->lock is held. */
4909 static void tg3_disable_nvram_access(struct tg3 *tp)
4910 {
4911         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4912             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4913                 u32 nvaccess = tr32(NVRAM_ACCESS);
4914
4915                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4916         }
4917 }
4918
4919 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4920 {
4921         int i;
4922         u32 apedata;
4923
4924         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4925         if (apedata != APE_SEG_SIG_MAGIC)
4926                 return;
4927
4928         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4929         if (apedata != APE_FW_STATUS_READY)
4930                 return;
4931
4932         /* Wait for up to 1 millisecond for APE to service previous event. */
4933         for (i = 0; i < 10; i++) {
4934                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4935                         return;
4936
4937                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4938
4939                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4940                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4941                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4942
4943                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4944
4945                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4946                         break;
4947
4948                 udelay(100);
4949         }
4950
4951         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4952                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4953 }
4954
4955 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4956 {
4957         u32 event;
4958         u32 apedata;
4959
4960         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4961                 return;
4962
4963         switch (kind) {
4964                 case RESET_KIND_INIT:
4965                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4966                                         APE_HOST_SEG_SIG_MAGIC);
4967                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4968                                         APE_HOST_SEG_LEN_MAGIC);
4969                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4970                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4971                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4972                                         APE_HOST_DRIVER_ID_MAGIC);
4973                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4974                                         APE_HOST_BEHAV_NO_PHYLOCK);
4975
4976                         event = APE_EVENT_STATUS_STATE_START;
4977                         break;
4978                 case RESET_KIND_SHUTDOWN:
4979                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4980                         break;
4981                 case RESET_KIND_SUSPEND:
4982                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4983                         break;
4984                 default:
4985                         return;
4986         }
4987
4988         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4989
4990         tg3_ape_send_event(tp, event);
4991 }
4992
4993 /* tp->lock is held. */
4994 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4995 {
4996         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4997                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4998
4999         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5000                 switch (kind) {
5001                 case RESET_KIND_INIT:
5002                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5003                                       DRV_STATE_START);
5004                         break;
5005
5006                 case RESET_KIND_SHUTDOWN:
5007                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5008                                       DRV_STATE_UNLOAD);
5009                         break;
5010
5011                 case RESET_KIND_SUSPEND:
5012                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5013                                       DRV_STATE_SUSPEND);
5014                         break;
5015
5016                 default:
5017                         break;
5018                 };
5019         }
5020
5021         if (kind == RESET_KIND_INIT ||
5022             kind == RESET_KIND_SUSPEND)
5023                 tg3_ape_driver_state_change(tp, kind);
5024 }
5025
5026 /* tp->lock is held. */
5027 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5028 {
5029         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5030                 switch (kind) {
5031                 case RESET_KIND_INIT:
5032                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5033                                       DRV_STATE_START_DONE);
5034                         break;
5035
5036                 case RESET_KIND_SHUTDOWN:
5037                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5038                                       DRV_STATE_UNLOAD_DONE);
5039                         break;
5040
5041                 default:
5042                         break;
5043                 };
5044         }
5045
5046         if (kind == RESET_KIND_SHUTDOWN)
5047                 tg3_ape_driver_state_change(tp, kind);
5048 }
5049
5050 /* tp->lock is held. */
5051 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5052 {
5053         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5054                 switch (kind) {
5055                 case RESET_KIND_INIT:
5056                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5057                                       DRV_STATE_START);
5058                         break;
5059
5060                 case RESET_KIND_SHUTDOWN:
5061                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5062                                       DRV_STATE_UNLOAD);
5063                         break;
5064
5065                 case RESET_KIND_SUSPEND:
5066                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5067                                       DRV_STATE_SUSPEND);
5068                         break;
5069
5070                 default:
5071                         break;
5072                 };
5073         }
5074 }
5075
5076 static int tg3_poll_fw(struct tg3 *tp)
5077 {
5078         int i;
5079         u32 val;
5080
5081         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5082                 /* Wait up to 20ms for init done. */
5083                 for (i = 0; i < 200; i++) {
5084                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5085                                 return 0;
5086                         udelay(100);
5087                 }
5088                 return -ENODEV;
5089         }
5090
5091         /* Wait for firmware initialization to complete. */
5092         for (i = 0; i < 100000; i++) {
5093                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5094                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5095                         break;
5096                 udelay(10);
5097         }
5098
5099         /* Chip might not be fitted with firmware.  Some Sun onboard
5100          * parts are configured like that.  So don't signal the timeout
5101          * of the above loop as an error, but do report the lack of
5102          * running firmware once.
5103          */
5104         if (i >= 100000 &&
5105             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5106                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5107
5108                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5109                        tp->dev->name);
5110         }
5111
5112         return 0;
5113 }
5114
5115 /* Save PCI command register before chip reset */
5116 static void tg3_save_pci_state(struct tg3 *tp)
5117 {
5118         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5119 }
5120
5121 /* Restore PCI state after chip reset */
5122 static void tg3_restore_pci_state(struct tg3 *tp)
5123 {
5124         u32 val;
5125
5126         /* Re-enable indirect register accesses. */
5127         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5128                                tp->misc_host_ctrl);
5129
5130         /* Set MAX PCI retry to zero. */
5131         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5132         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5133             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5134                 val |= PCISTATE_RETRY_SAME_DMA;
5135         /* Allow reads and writes to the APE register and memory space. */
5136         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5137                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5138                        PCISTATE_ALLOW_APE_SHMEM_WR;
5139         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5140
5141         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5142
5143         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5144                 pcie_set_readrq(tp->pdev, 4096);
5145         else {
5146                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5147                                       tp->pci_cacheline_sz);
5148                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5149                                       tp->pci_lat_timer);
5150         }
5151
5152         /* Make sure PCI-X relaxed ordering bit is clear. */
5153         if (tp->pcix_cap) {
5154                 u16 pcix_cmd;
5155
5156                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5157                                      &pcix_cmd);
5158                 pcix_cmd &= ~PCI_X_CMD_ERO;
5159                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5160                                       pcix_cmd);
5161         }
5162
5163         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5164
5165                 /* Chip reset on 5780 will reset MSI enable bit,
5166                  * so need to restore it.
5167                  */
5168                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5169                         u16 ctrl;
5170
5171                         pci_read_config_word(tp->pdev,
5172                                              tp->msi_cap + PCI_MSI_FLAGS,
5173                                              &ctrl);
5174                         pci_write_config_word(tp->pdev,
5175                                               tp->msi_cap + PCI_MSI_FLAGS,
5176                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5177                         val = tr32(MSGINT_MODE);
5178                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5179                 }
5180         }
5181 }
5182
5183 static void tg3_stop_fw(struct tg3 *);
5184
5185 /* tp->lock is held. */
5186 static int tg3_chip_reset(struct tg3 *tp)
5187 {
5188         u32 val;
5189         void (*write_op)(struct tg3 *, u32, u32);
5190         int err;
5191
5192         tg3_nvram_lock(tp);
5193
5194         /* No matching tg3_nvram_unlock() after this because
5195          * chip reset below will undo the nvram lock.
5196          */
5197         tp->nvram_lock_cnt = 0;
5198
5199         /* GRC_MISC_CFG core clock reset will clear the memory
5200          * enable bit in PCI register 4 and the MSI enable bit
5201          * on some chips, so we save relevant registers here.
5202          */
5203         tg3_save_pci_state(tp);
5204
5205         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5206             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5207             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5208             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5209             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5210                 tw32(GRC_FASTBOOT_PC, 0);
5211
5212         /*
5213          * We must avoid the readl() that normally takes place.
5214          * It locks machines, causes machine checks, and other
5215          * fun things.  So, temporarily disable the 5701
5216          * hardware workaround, while we do the reset.
5217          */
5218         write_op = tp->write32;
5219         if (write_op == tg3_write_flush_reg32)
5220                 tp->write32 = tg3_write32;
5221
5222         /* Prevent the irq handler from reading or writing PCI registers
5223          * during chip reset when the memory enable bit in the PCI command
5224          * register may be cleared.  The chip does not generate interrupt
5225          * at this time, but the irq handler may still be called due to irq
5226          * sharing or irqpoll.
5227          */
5228         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5229         if (tp->hw_status) {
5230                 tp->hw_status->status = 0;
5231                 tp->hw_status->status_tag = 0;
5232         }
5233         tp->last_tag = 0;
5234         smp_mb();
5235         synchronize_irq(tp->pdev->irq);
5236
5237         /* do the reset */
5238         val = GRC_MISC_CFG_CORECLK_RESET;
5239
5240         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5241                 if (tr32(0x7e2c) == 0x60) {
5242                         tw32(0x7e2c, 0x20);
5243                 }
5244                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5245                         tw32(GRC_MISC_CFG, (1 << 29));
5246                         val |= (1 << 29);
5247                 }
5248         }
5249
5250         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5251                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5252                 tw32(GRC_VCPU_EXT_CTRL,
5253                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5254         }
5255
5256         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5257                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5258         tw32(GRC_MISC_CFG, val);
5259
5260         /* restore 5701 hardware bug workaround write method */
5261         tp->write32 = write_op;
5262
5263         /* Unfortunately, we have to delay before the PCI read back.
5264          * Some 575X chips even will not respond to a PCI cfg access
5265          * when the reset command is given to the chip.
5266          *
5267          * How do these hardware designers expect things to work
5268          * properly if the PCI write is posted for a long period
5269          * of time?  It is always necessary to have some method by
5270          * which a register read back can occur to push the write
5271          * out which does the reset.
5272          *
5273          * For most tg3 variants the trick below was working.
5274          * Ho hum...
5275          */
5276         udelay(120);
5277
5278         /* Flush PCI posted writes.  The normal MMIO registers
5279          * are inaccessible at this time so this is the only
5280          * way to make this reliably (actually, this is no longer
5281          * the case, see above).  I tried to use indirect
5282          * register read/write but this upset some 5701 variants.
5283          */
5284         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5285
5286         udelay(120);
5287
5288         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5289                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5290                         int i;
5291                         u32 cfg_val;
5292
5293                         /* Wait for link training to complete.  */
5294                         for (i = 0; i < 5000; i++)
5295                                 udelay(100);
5296
5297                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5298                         pci_write_config_dword(tp->pdev, 0xc4,
5299                                                cfg_val | (1 << 15));
5300                 }
5301                 /* Set PCIE max payload size and clear error status.  */
5302                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5303         }
5304
5305         tg3_restore_pci_state(tp);
5306
5307         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5308
5309         val = 0;
5310         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5311                 val = tr32(MEMARB_MODE);
5312         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5313
5314         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5315                 tg3_stop_fw(tp);
5316                 tw32(0x5000, 0x400);
5317         }
5318
5319         tw32(GRC_MODE, tp->grc_mode);
5320
5321         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5322                 val = tr32(0xc4);
5323
5324                 tw32(0xc4, val | (1 << 15));
5325         }
5326
5327         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5328             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5329                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5330                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5331                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5332                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5333         }
5334
5335         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5336                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5337                 tw32_f(MAC_MODE, tp->mac_mode);
5338         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5339                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5340                 tw32_f(MAC_MODE, tp->mac_mode);
5341         } else
5342                 tw32_f(MAC_MODE, 0);
5343         udelay(40);
5344
5345         err = tg3_poll_fw(tp);
5346         if (err)
5347                 return err;
5348
5349         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5350             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5351                 val = tr32(0x7c00);
5352
5353                 tw32(0x7c00, val | (1 << 25));
5354         }
5355
5356         /* Reprobe ASF enable state.  */
5357         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5358         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5359         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5360         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5361                 u32 nic_cfg;
5362
5363                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5364                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5365                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5366                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5367                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5368                 }
5369         }
5370
5371         return 0;
5372 }
5373
5374 /* tp->lock is held. */
5375 static void tg3_stop_fw(struct tg3 *tp)
5376 {
5377         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5378            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5379                 u32 val;
5380                 int i;
5381
5382                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5383                 val = tr32(GRC_RX_CPU_EVENT);
5384                 val |= (1 << 14);
5385                 tw32(GRC_RX_CPU_EVENT, val);
5386
5387                 /* Wait for RX cpu to ACK the event.  */
5388                 for (i = 0; i < 100; i++) {
5389                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5390                                 break;
5391                         udelay(1);
5392                 }
5393         }
5394 }
5395
5396 /* tp->lock is held. */
5397 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5398 {
5399         int err;
5400
5401         tg3_stop_fw(tp);
5402
5403         tg3_write_sig_pre_reset(tp, kind);
5404
5405         tg3_abort_hw(tp, silent);
5406         err = tg3_chip_reset(tp);
5407
5408         tg3_write_sig_legacy(tp, kind);
5409         tg3_write_sig_post_reset(tp, kind);
5410
5411         if (err)
5412                 return err;
5413
5414         return 0;
5415 }
5416
5417 #define TG3_FW_RELEASE_MAJOR    0x0
5418 #define TG3_FW_RELASE_MINOR     0x0
5419 #define TG3_FW_RELEASE_FIX      0x0
5420 #define TG3_FW_START_ADDR       0x08000000
5421 #define TG3_FW_TEXT_ADDR        0x08000000
5422 #define TG3_FW_TEXT_LEN         0x9c0
5423 #define TG3_FW_RODATA_ADDR      0x080009c0
5424 #define TG3_FW_RODATA_LEN       0x60
5425 #define TG3_FW_DATA_ADDR        0x08000a40
5426 #define TG3_FW_DATA_LEN         0x20
5427 #define TG3_FW_SBSS_ADDR        0x08000a60
5428 #define TG3_FW_SBSS_LEN         0xc
5429 #define TG3_FW_BSS_ADDR         0x08000a70
5430 #define TG3_FW_BSS_LEN          0x10
5431
5432 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5433         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5434         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5435         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5436         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5437         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5438         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5439         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5440         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5441         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5442         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5443         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5444         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5445         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5446         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5447         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5448         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5449         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5450         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5451         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5452         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5453         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5454         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5455         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5456         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5457         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5458         0, 0, 0, 0, 0, 0,
5459         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5460         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5461         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5462         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5463         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5464         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5465         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5466         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5467         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5468         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5469         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5470         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5471         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5472         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5473         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5474         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5475         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5476         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5477         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5478         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5479         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5480         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5481         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5482         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5483         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5484         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5485         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5486         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5487         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5488         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5489         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5490         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5491         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5492         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5493         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5494         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5495         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5496         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5497         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5498         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5499         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5500         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5501         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5502         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5503         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5504         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5505         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5506         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5507         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5508         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5509         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5510         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5511         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5512         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5513         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5514         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5515         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5516         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5517         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5518         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5519         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5520         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5521         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5522         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5523         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5524 };
5525
5526 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5527         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5528         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5529         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5530         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5531         0x00000000
5532 };
5533
5534 #if 0 /* All zeros, don't eat up space with it. */
5535 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5536         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5537         0x00000000, 0x00000000, 0x00000000, 0x00000000
5538 };
5539 #endif
5540
5541 #define RX_CPU_SCRATCH_BASE     0x30000
5542 #define RX_CPU_SCRATCH_SIZE     0x04000
5543 #define TX_CPU_SCRATCH_BASE     0x34000
5544 #define TX_CPU_SCRATCH_SIZE     0x04000
5545
5546 /* tp->lock is held. */
5547 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5548 {
5549         int i;
5550
5551         BUG_ON(offset == TX_CPU_BASE &&
5552             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5553
5554         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5555                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5556
5557                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5558                 return 0;
5559         }
5560         if (offset == RX_CPU_BASE) {
5561                 for (i = 0; i < 10000; i++) {
5562                         tw32(offset + CPU_STATE, 0xffffffff);
5563                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5564                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5565                                 break;
5566                 }
5567
5568                 tw32(offset + CPU_STATE, 0xffffffff);
5569                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5570                 udelay(10);
5571         } else {
5572                 for (i = 0; i < 10000; i++) {
5573                         tw32(offset + CPU_STATE, 0xffffffff);
5574                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5575                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5576                                 break;
5577                 }
5578         }
5579
5580         if (i >= 10000) {
5581                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5582                        "and %s CPU\n",
5583                        tp->dev->name,
5584                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5585                 return -ENODEV;
5586         }
5587
5588         /* Clear firmware's nvram arbitration. */
5589         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5590                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5591         return 0;
5592 }
5593
5594 struct fw_info {
5595         unsigned int text_base;
5596         unsigned int text_len;
5597         const u32 *text_data;
5598         unsigned int rodata_base;
5599         unsigned int rodata_len;
5600         const u32 *rodata_data;
5601         unsigned int data_base;
5602         unsigned int data_len;
5603         const u32 *data_data;
5604 };
5605
5606 /* tp->lock is held. */
5607 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5608                                  int cpu_scratch_size, struct fw_info *info)
5609 {
5610         int err, lock_err, i;
5611         void (*write_op)(struct tg3 *, u32, u32);
5612
5613         if (cpu_base == TX_CPU_BASE &&
5614             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5615                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5616                        "TX cpu firmware on %s which is 5705.\n",
5617                        tp->dev->name);
5618                 return -EINVAL;
5619         }
5620
5621         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5622                 write_op = tg3_write_mem;
5623         else
5624                 write_op = tg3_write_indirect_reg32;
5625
5626         /* It is possible that bootcode is still loading at this point.
5627          * Get the nvram lock first before halting the cpu.
5628          */
5629         lock_err = tg3_nvram_lock(tp);
5630         err = tg3_halt_cpu(tp, cpu_base);
5631         if (!lock_err)
5632                 tg3_nvram_unlock(tp);
5633         if (err)
5634                 goto out;
5635
5636         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5637                 write_op(tp, cpu_scratch_base + i, 0);
5638         tw32(cpu_base + CPU_STATE, 0xffffffff);
5639         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5640         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5641                 write_op(tp, (cpu_scratch_base +
5642                               (info->text_base & 0xffff) +
5643                               (i * sizeof(u32))),
5644                          (info->text_data ?
5645                           info->text_data[i] : 0));
5646         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5647                 write_op(tp, (cpu_scratch_base +
5648                               (info->rodata_base & 0xffff) +
5649                               (i * sizeof(u32))),
5650                          (info->rodata_data ?
5651                           info->rodata_data[i] : 0));
5652         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5653                 write_op(tp, (cpu_scratch_base +
5654                               (info->data_base & 0xffff) +
5655                               (i * sizeof(u32))),
5656                          (info->data_data ?
5657                           info->data_data[i] : 0));
5658
5659         err = 0;
5660
5661 out:
5662         return err;
5663 }
5664
5665 /* tp->lock is held. */
5666 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5667 {
5668         struct fw_info info;
5669         int err, i;
5670
5671         info.text_base = TG3_FW_TEXT_ADDR;
5672         info.text_len = TG3_FW_TEXT_LEN;
5673         info.text_data = &tg3FwText[0];
5674         info.rodata_base = TG3_FW_RODATA_ADDR;
5675         info.rodata_len = TG3_FW_RODATA_LEN;
5676         info.rodata_data = &tg3FwRodata[0];
5677         info.data_base = TG3_FW_DATA_ADDR;
5678         info.data_len = TG3_FW_DATA_LEN;
5679         info.data_data = NULL;
5680
5681         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5682                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5683                                     &info);
5684         if (err)
5685                 return err;
5686
5687         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5688                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5689                                     &info);
5690         if (err)
5691                 return err;
5692
5693         /* Now startup only the RX cpu. */
5694         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5695         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5696
5697         for (i = 0; i < 5; i++) {
5698                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5699                         break;
5700                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5701                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5702                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5703                 udelay(1000);
5704         }
5705         if (i >= 5) {
5706                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5707                        "to set RX CPU PC, is %08x should be %08x\n",
5708                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5709                        TG3_FW_TEXT_ADDR);
5710                 return -ENODEV;
5711         }
5712         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5713         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5714
5715         return 0;
5716 }
5717
5718
5719 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5720 #define TG3_TSO_FW_RELASE_MINOR         0x6
5721 #define TG3_TSO_FW_RELEASE_FIX          0x0
5722 #define TG3_TSO_FW_START_ADDR           0x08000000
5723 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5724 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5725 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5726 #define TG3_TSO_FW_RODATA_LEN           0x60
5727 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5728 #define TG3_TSO_FW_DATA_LEN             0x30
5729 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5730 #define TG3_TSO_FW_SBSS_LEN             0x2c
5731 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5732 #define TG3_TSO_FW_BSS_LEN              0x894
5733
5734 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5735         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5736         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5737         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5738         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5739         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5740         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5741         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5742         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5743         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5744         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5745         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5746         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5747         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5748         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5749         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5750         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5751         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5752         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5753         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5754         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5755         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5756         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5757         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5758         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5759         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5760         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5761         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5762         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5763         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5764         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5765         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5766         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5767         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5768         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5769         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5770         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5771         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5772         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5773         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5774         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5775         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5776         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5777         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5778         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5779         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5780         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5781         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5782         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5783         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5784         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5785         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5786         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5787         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5788         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5789         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5790         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5791         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5792         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5793         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5794         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5795         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5796         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5797         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5798         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5799         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5800         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5801         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5802         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5803         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5804         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5805         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5806         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5807         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5808         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5809         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5810         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5811         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5812         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5813         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5814         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5815         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5816         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5817         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5818         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5819         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5820         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5821         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5822         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5823         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5824         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5825         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5826         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5827         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5828         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5829         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5830         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5831         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5832         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5833         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5834         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5835         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5836         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5837         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5838         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5839         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5840         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5841         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5842         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5843         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5844         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5845         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5846         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5847         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5848         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5849         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5850         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5851         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5852         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5853         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5854         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5855         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5856         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5857         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5858         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5859         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5860         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5861         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5862         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5863         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5864         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5865         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5866         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5867         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5868         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5869         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5870         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5871         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5872         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5873         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5874         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5875         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5876         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5877         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5878         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5879         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5880         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5881         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5882         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5883         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5884         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5885         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5886         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5887         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5888         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5889         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5890         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5891         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5892         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5893         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5894         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5895         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5896         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5897         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5898         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5899         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5900         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5901         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5902         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5903         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5904         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5905         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5906         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5907         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5908         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5909         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5910         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5911         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5912         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5913         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5914         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5915         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5916         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5917         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5918         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5919         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5920         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5921         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5922         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5923         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5924         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5925         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5926         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5927         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5928         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5929         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5930         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5931         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5932         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5933         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5934         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5935         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5936         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5937         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5938         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5939         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5940         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5941         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5942         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5943         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5944         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5945         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5946         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5947         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5948         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5949         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5950         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5951         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5952         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5953         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5954         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5955         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5956         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5957         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5958         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5959         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5960         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5961         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5962         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5963         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5964         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5965         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5966         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5967         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5968         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5969         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5970         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5971         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5972         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5973         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5974         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5975         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5976         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5977         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5978         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5979         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5980         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5981         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5982         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5983         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5984         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5985         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5986         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5987         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5988         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5989         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5990         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5991         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5992         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5993         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5994         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5995         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5996         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5997         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5998         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5999         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6000         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6001         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6002         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6003         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6004         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6005         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6006         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6007         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6008         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6009         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6010         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6011         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6012         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6013         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6014         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6015         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6016         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6017         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6018         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6019 };
6020
6021 static const u32 tg3TsoFwRodata[] = {
6022         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6023         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6024         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6025         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6026         0x00000000,
6027 };
6028
6029 static const u32 tg3TsoFwData[] = {
6030         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6031         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6032         0x00000000,
6033 };
6034
6035 /* 5705 needs a special version of the TSO firmware.  */
6036 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6037 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6038 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6039 #define TG3_TSO5_FW_START_ADDR          0x00010000
6040 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6041 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6042 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6043 #define TG3_TSO5_FW_RODATA_LEN          0x50
6044 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6045 #define TG3_TSO5_FW_DATA_LEN            0x20
6046 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6047 #define TG3_TSO5_FW_SBSS_LEN            0x28
6048 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6049 #define TG3_TSO5_FW_BSS_LEN             0x88
6050
6051 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6052         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6053         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6054         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6055         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6056         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6057         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6058         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6059         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6060         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6061         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6062         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6063         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6064         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6065         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6066         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6067         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6068         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6069         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6070         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6071         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6072         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6073         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6074         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6075         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6076         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6077         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6078         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6079         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6080         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6081         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6082         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6083         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6084         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6085         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6086         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6087         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6088         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6089         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6090         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6091         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6092         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6093         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6094         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6095         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6096         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6097         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6098         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6099         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6100         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6101         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6102         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6103         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6104         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6105         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6106         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6107         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6108         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6109         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6110         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6111         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6112         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6113         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6114         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6115         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6116         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6117         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6118         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6119         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6120         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6121         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6122         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6123         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6124         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6125         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6126         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6127         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6128         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6129         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6130         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6131         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6132         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6133         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6134         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6135         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6136         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6137         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6138         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6139         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6140         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6141         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6142         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6143         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6144         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6145         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6146         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6147         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6148         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6149         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6150         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6151         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6152         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6153         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6154         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6155         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6156         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6157         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6158         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6159         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6160         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6161         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6162         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6163         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6164         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6165         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6166         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6167         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6168         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6169         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6170         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6171         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6172         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6173         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6174         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6175         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6176         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6177         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6178         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6179         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6180         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6181         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6182         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6183         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6184         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6185         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6186         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6187         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6188         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6189         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6190         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6191         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6192         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6193         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6194         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6195         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6196         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6197         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6198         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6199         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6200         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6201         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6202         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6203         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6204         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6205         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6206         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6207         0x00000000, 0x00000000, 0x00000000,
6208 };
6209
6210 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6211         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6212         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6213         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6214         0x00000000, 0x00000000, 0x00000000,
6215 };
6216
6217 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6218         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6219         0x00000000, 0x00000000, 0x00000000,
6220 };
6221
6222 /* tp->lock is held. */
6223 static int tg3_load_tso_firmware(struct tg3 *tp)
6224 {
6225         struct fw_info info;
6226         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6227         int err, i;
6228
6229         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6230                 return 0;
6231
6232         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6233                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6234                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6235                 info.text_data = &tg3Tso5FwText[0];
6236                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6237                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6238                 info.rodata_data = &tg3Tso5FwRodata[0];
6239                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6240                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6241                 info.data_data = &tg3Tso5FwData[0];
6242                 cpu_base = RX_CPU_BASE;
6243                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6244                 cpu_scratch_size = (info.text_len +
6245                                     info.rodata_len +
6246                                     info.data_len +
6247                                     TG3_TSO5_FW_SBSS_LEN +
6248                                     TG3_TSO5_FW_BSS_LEN);
6249         } else {
6250                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6251                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6252                 info.text_data = &tg3TsoFwText[0];
6253                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6254                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6255                 info.rodata_data = &tg3TsoFwRodata[0];
6256                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6257                 info.data_len = TG3_TSO_FW_DATA_LEN;
6258                 info.data_data = &tg3TsoFwData[0];
6259                 cpu_base = TX_CPU_BASE;
6260                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6261                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6262         }
6263
6264         err = tg3_load_firmware_cpu(tp, cpu_base,
6265                                     cpu_scratch_base, cpu_scratch_size,
6266                                     &info);
6267         if (err)
6268                 return err;
6269
6270         /* Now startup the cpu. */
6271         tw32(cpu_base + CPU_STATE, 0xffffffff);
6272         tw32_f(cpu_base + CPU_PC,    info.text_base);
6273
6274         for (i = 0; i < 5; i++) {
6275                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6276                         break;
6277                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6278                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6279                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6280                 udelay(1000);
6281         }
6282         if (i >= 5) {
6283                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6284                        "to set CPU PC, is %08x should be %08x\n",
6285                        tp->dev->name, tr32(cpu_base + CPU_PC),
6286                        info.text_base);
6287                 return -ENODEV;
6288         }
6289         tw32(cpu_base + CPU_STATE, 0xffffffff);
6290         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6291         return 0;
6292 }
6293
6294
6295 /* tp->lock is held. */
6296 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6297 {
6298         u32 addr_high, addr_low;
6299         int i;
6300
6301         addr_high = ((tp->dev->dev_addr[0] << 8) |
6302                      tp->dev->dev_addr[1]);
6303         addr_low = ((tp->dev->dev_addr[2] << 24) |
6304                     (tp->dev->dev_addr[3] << 16) |
6305                     (tp->dev->dev_addr[4] <<  8) |
6306                     (tp->dev->dev_addr[5] <<  0));
6307         for (i = 0; i < 4; i++) {
6308                 if (i == 1 && skip_mac_1)
6309                         continue;
6310                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6311                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6312         }
6313
6314         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6315             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6316                 for (i = 0; i < 12; i++) {
6317                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6318                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6319                 }
6320         }
6321
6322         addr_high = (tp->dev->dev_addr[0] +
6323                      tp->dev->dev_addr[1] +
6324                      tp->dev->dev_addr[2] +
6325                      tp->dev->dev_addr[3] +
6326                      tp->dev->dev_addr[4] +
6327                      tp->dev->dev_addr[5]) &
6328                 TX_BACKOFF_SEED_MASK;
6329         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6330 }
6331
6332 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6333 {
6334         struct tg3 *tp = netdev_priv(dev);
6335         struct sockaddr *addr = p;
6336         int err = 0, skip_mac_1 = 0;
6337
6338         if (!is_valid_ether_addr(addr->sa_data))
6339                 return -EINVAL;
6340
6341         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6342
6343         if (!netif_running(dev))
6344                 return 0;
6345
6346         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6347                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6348
6349                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6350                 addr0_low = tr32(MAC_ADDR_0_LOW);
6351                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6352                 addr1_low = tr32(MAC_ADDR_1_LOW);
6353
6354                 /* Skip MAC addr 1 if ASF is using it. */
6355                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6356                     !(addr1_high == 0 && addr1_low == 0))
6357                         skip_mac_1 = 1;
6358         }
6359         spin_lock_bh(&tp->lock);
6360         __tg3_set_mac_addr(tp, skip_mac_1);
6361         spin_unlock_bh(&tp->lock);
6362
6363         return err;
6364 }
6365
6366 /* tp->lock is held. */
6367 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6368                            dma_addr_t mapping, u32 maxlen_flags,
6369                            u32 nic_addr)
6370 {
6371         tg3_write_mem(tp,
6372                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6373                       ((u64) mapping >> 32));
6374         tg3_write_mem(tp,
6375                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6376                       ((u64) mapping & 0xffffffff));
6377         tg3_write_mem(tp,
6378                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6379                        maxlen_flags);
6380
6381         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6382                 tg3_write_mem(tp,
6383                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6384                               nic_addr);
6385 }
6386
6387 static void __tg3_set_rx_mode(struct net_device *);
6388 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6389 {
6390         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6391         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6392         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6393         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6394         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6395                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6396                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6397         }
6398         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6399         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6400         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6401                 u32 val = ec->stats_block_coalesce_usecs;
6402
6403                 if (!netif_carrier_ok(tp->dev))
6404                         val = 0;
6405
6406                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6407         }
6408 }
6409
6410 /* tp->lock is held. */
6411 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6412 {
6413         u32 val, rdmac_mode;
6414         int i, err, limit;
6415
6416         tg3_disable_ints(tp);
6417
6418         tg3_stop_fw(tp);
6419
6420         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6421
6422         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6423                 tg3_abort_hw(tp, 1);
6424         }
6425
6426         if (reset_phy)
6427                 tg3_phy_reset(tp);
6428
6429         err = tg3_chip_reset(tp);
6430         if (err)
6431                 return err;
6432
6433         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6434
6435         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6436             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6437                 val = tr32(TG3_CPMU_CTRL);
6438                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6439                 tw32(TG3_CPMU_CTRL, val);
6440
6441                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6442                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6443                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6444                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6445
6446                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6447                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6448                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6449                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6450
6451                 val = tr32(TG3_CPMU_HST_ACC);
6452                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6453                 val |= CPMU_HST_ACC_MACCLK_6_25;
6454                 tw32(TG3_CPMU_HST_ACC, val);
6455         }
6456
6457         /* This works around an issue with Athlon chipsets on
6458          * B3 tigon3 silicon.  This bit has no effect on any
6459          * other revision.  But do not set this on PCI Express
6460          * chips and don't even touch the clocks if the CPMU is present.
6461          */
6462         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6463                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6464                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6465                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6466         }
6467
6468         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6469             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6470                 val = tr32(TG3PCI_PCISTATE);
6471                 val |= PCISTATE_RETRY_SAME_DMA;
6472                 tw32(TG3PCI_PCISTATE, val);
6473         }
6474
6475         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6476                 /* Allow reads and writes to the
6477                  * APE register and memory space.
6478                  */
6479                 val = tr32(TG3PCI_PCISTATE);
6480                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6481                        PCISTATE_ALLOW_APE_SHMEM_WR;
6482                 tw32(TG3PCI_PCISTATE, val);
6483         }
6484
6485         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6486                 /* Enable some hw fixes.  */
6487                 val = tr32(TG3PCI_MSI_DATA);
6488                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6489                 tw32(TG3PCI_MSI_DATA, val);
6490         }
6491
6492         /* Descriptor ring init may make accesses to the
6493          * NIC SRAM area to setup the TX descriptors, so we
6494          * can only do this after the hardware has been
6495          * successfully reset.
6496          */
6497         err = tg3_init_rings(tp);
6498         if (err)
6499                 return err;
6500
6501         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6502             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6503                 /* This value is determined during the probe time DMA
6504                  * engine test, tg3_test_dma.
6505                  */
6506                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6507         }
6508
6509         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6510                           GRC_MODE_4X_NIC_SEND_RINGS |
6511                           GRC_MODE_NO_TX_PHDR_CSUM |
6512                           GRC_MODE_NO_RX_PHDR_CSUM);
6513         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6514
6515         /* Pseudo-header checksum is done by hardware logic and not
6516          * the offload processers, so make the chip do the pseudo-
6517          * header checksums on receive.  For transmit it is more
6518          * convenient to do the pseudo-header checksum in software
6519          * as Linux does that on transmit for us in all cases.
6520          */
6521         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6522
6523         tw32(GRC_MODE,
6524              tp->grc_mode |
6525              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6526
6527         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6528         val = tr32(GRC_MISC_CFG);
6529         val &= ~0xff;
6530         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6531         tw32(GRC_MISC_CFG, val);
6532
6533         /* Initialize MBUF/DESC pool. */
6534         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6535                 /* Do nothing.  */
6536         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6537                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6538                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6539                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6540                 else
6541                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6542                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6543                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6544         }
6545         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6546                 int fw_len;
6547
6548                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6549                           TG3_TSO5_FW_RODATA_LEN +
6550                           TG3_TSO5_FW_DATA_LEN +
6551                           TG3_TSO5_FW_SBSS_LEN +
6552                           TG3_TSO5_FW_BSS_LEN);
6553                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6554                 tw32(BUFMGR_MB_POOL_ADDR,
6555                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6556                 tw32(BUFMGR_MB_POOL_SIZE,
6557                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6558         }
6559
6560         if (tp->dev->mtu <= ETH_DATA_LEN) {
6561                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6562                      tp->bufmgr_config.mbuf_read_dma_low_water);
6563                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6564                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6565                 tw32(BUFMGR_MB_HIGH_WATER,
6566                      tp->bufmgr_config.mbuf_high_water);
6567         } else {
6568                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6569                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6570                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6571                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6572                 tw32(BUFMGR_MB_HIGH_WATER,
6573                      tp->bufmgr_config.mbuf_high_water_jumbo);
6574         }
6575         tw32(BUFMGR_DMA_LOW_WATER,
6576              tp->bufmgr_config.dma_low_water);
6577         tw32(BUFMGR_DMA_HIGH_WATER,
6578              tp->bufmgr_config.dma_high_water);
6579
6580         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6581         for (i = 0; i < 2000; i++) {
6582                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6583                         break;
6584                 udelay(10);
6585         }
6586         if (i >= 2000) {
6587                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6588                        tp->dev->name);
6589                 return -ENODEV;
6590         }
6591
6592         /* Setup replenish threshold. */
6593         val = tp->rx_pending / 8;
6594         if (val == 0)
6595                 val = 1;
6596         else if (val > tp->rx_std_max_post)
6597                 val = tp->rx_std_max_post;
6598         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6599                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6600                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6601
6602                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6603                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6604         }
6605
6606         tw32(RCVBDI_STD_THRESH, val);
6607
6608         /* Initialize TG3_BDINFO's at:
6609          *  RCVDBDI_STD_BD:     standard eth size rx ring
6610          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6611          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6612          *
6613          * like so:
6614          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6615          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6616          *                              ring attribute flags
6617          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6618          *
6619          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6620          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6621          *
6622          * The size of each ring is fixed in the firmware, but the location is
6623          * configurable.
6624          */
6625         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6626              ((u64) tp->rx_std_mapping >> 32));
6627         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6628              ((u64) tp->rx_std_mapping & 0xffffffff));
6629         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6630              NIC_SRAM_RX_BUFFER_DESC);
6631
6632         /* Don't even try to program the JUMBO/MINI buffer descriptor
6633          * configs on 5705.
6634          */
6635         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6636                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6637                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6638         } else {
6639                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6640                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6641
6642                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6643                      BDINFO_FLAGS_DISABLED);
6644
6645                 /* Setup replenish threshold. */
6646                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6647
6648                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6649                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6650                              ((u64) tp->rx_jumbo_mapping >> 32));
6651                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6652                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6653                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6654                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6655                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6656                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6657                 } else {
6658                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6659                              BDINFO_FLAGS_DISABLED);
6660                 }
6661
6662         }
6663
6664         /* There is only one send ring on 5705/5750, no need to explicitly
6665          * disable the others.
6666          */
6667         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6668                 /* Clear out send RCB ring in SRAM. */
6669                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6670                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6671                                       BDINFO_FLAGS_DISABLED);
6672         }
6673
6674         tp->tx_prod = 0;
6675         tp->tx_cons = 0;
6676         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6677         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6678
6679         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6680                        tp->tx_desc_mapping,
6681                        (TG3_TX_RING_SIZE <<
6682                         BDINFO_FLAGS_MAXLEN_SHIFT),
6683                        NIC_SRAM_TX_BUFFER_DESC);
6684
6685         /* There is only one receive return ring on 5705/5750, no need
6686          * to explicitly disable the others.
6687          */
6688         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6689                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6690                      i += TG3_BDINFO_SIZE) {
6691                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6692                                       BDINFO_FLAGS_DISABLED);
6693                 }
6694         }
6695
6696         tp->rx_rcb_ptr = 0;
6697         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6698
6699         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6700                        tp->rx_rcb_mapping,
6701                        (TG3_RX_RCB_RING_SIZE(tp) <<
6702                         BDINFO_FLAGS_MAXLEN_SHIFT),
6703                        0);
6704
6705         tp->rx_std_ptr = tp->rx_pending;
6706         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6707                      tp->rx_std_ptr);
6708
6709         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6710                                                 tp->rx_jumbo_pending : 0;
6711         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6712                      tp->rx_jumbo_ptr);
6713
6714         /* Initialize MAC address and backoff seed. */
6715         __tg3_set_mac_addr(tp, 0);
6716
6717         /* MTU + ethernet header + FCS + optional VLAN tag */
6718         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6719
6720         /* The slot time is changed by tg3_setup_phy if we
6721          * run at gigabit with half duplex.
6722          */
6723         tw32(MAC_TX_LENGTHS,
6724              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6725              (6 << TX_LENGTHS_IPG_SHIFT) |
6726              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6727
6728         /* Receive rules. */
6729         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6730         tw32(RCVLPC_CONFIG, 0x0181);
6731
6732         /* Calculate RDMAC_MODE setting early, we need it to determine
6733          * the RCVLPC_STATE_ENABLE mask.
6734          */
6735         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6736                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6737                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6738                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6739                       RDMAC_MODE_LNGREAD_ENAB);
6740
6741         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6742                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6743                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6744                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6745
6746         /* If statement applies to 5705 and 5750 PCI devices only */
6747         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6748              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6749             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6750                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6751                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6752                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6753                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6754                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6755                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6756                 }
6757         }
6758
6759         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6760                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6761
6762         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6763                 rdmac_mode |= (1 << 27);
6764
6765         /* Receive/send statistics. */
6766         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6767                 val = tr32(RCVLPC_STATS_ENABLE);
6768                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6769                 tw32(RCVLPC_STATS_ENABLE, val);
6770         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6771                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6772                 val = tr32(RCVLPC_STATS_ENABLE);
6773                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6774                 tw32(RCVLPC_STATS_ENABLE, val);
6775         } else {
6776                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6777         }
6778         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6779         tw32(SNDDATAI_STATSENAB, 0xffffff);
6780         tw32(SNDDATAI_STATSCTRL,
6781              (SNDDATAI_SCTRL_ENABLE |
6782               SNDDATAI_SCTRL_FASTUPD));
6783
6784         /* Setup host coalescing engine. */
6785         tw32(HOSTCC_MODE, 0);
6786         for (i = 0; i < 2000; i++) {
6787                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6788                         break;
6789                 udelay(10);
6790         }
6791
6792         __tg3_set_coalesce(tp, &tp->coal);
6793
6794         /* set status block DMA address */
6795         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6796              ((u64) tp->status_mapping >> 32));
6797         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6798              ((u64) tp->status_mapping & 0xffffffff));
6799
6800         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6801                 /* Status/statistics block address.  See tg3_timer,
6802                  * the tg3_periodic_fetch_stats call there, and
6803                  * tg3_get_stats to see how this works for 5705/5750 chips.
6804                  */
6805                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6806                      ((u64) tp->stats_mapping >> 32));
6807                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6808                      ((u64) tp->stats_mapping & 0xffffffff));
6809                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6810                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6811         }
6812
6813         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6814
6815         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6816         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6817         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6818                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6819
6820         /* Clear statistics/status block in chip, and status block in ram. */
6821         for (i = NIC_SRAM_STATS_BLK;
6822              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6823              i += sizeof(u32)) {
6824                 tg3_write_mem(tp, i, 0);
6825                 udelay(40);
6826         }
6827         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6828
6829         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6830                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6831                 /* reset to prevent losing 1st rx packet intermittently */
6832                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6833                 udelay(10);
6834         }
6835
6836         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6837                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6838         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6839             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6840             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6841                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6842         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6843         udelay(40);
6844
6845         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6846          * If TG3_FLG2_IS_NIC is zero, we should read the
6847          * register to preserve the GPIO settings for LOMs. The GPIOs,
6848          * whether used as inputs or outputs, are set by boot code after
6849          * reset.
6850          */
6851         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6852                 u32 gpio_mask;
6853
6854                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6855                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6856                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6857
6858                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6859                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6860                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6861
6862                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6863                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6864
6865                 tp->grc_local_ctrl &= ~gpio_mask;
6866                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6867
6868                 /* GPIO1 must be driven high for eeprom write protect */
6869                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6870                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6871                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6872         }
6873         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6874         udelay(100);
6875
6876         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6877         tp->last_tag = 0;
6878
6879         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6880                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6881                 udelay(40);
6882         }
6883
6884         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6885                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6886                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6887                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6888                WDMAC_MODE_LNGREAD_ENAB);
6889
6890         /* If statement applies to 5705 and 5750 PCI devices only */
6891         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6892              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6893             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6894                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6895                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6896                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6897                         /* nothing */
6898                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6899                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6900                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6901                         val |= WDMAC_MODE_RX_ACCEL;
6902                 }
6903         }
6904
6905         /* Enable host coalescing bug fix */
6906         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6907             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6908             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6909             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6910                 val |= (1 << 29);
6911
6912         tw32_f(WDMAC_MODE, val);
6913         udelay(40);
6914
6915         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6916                 u16 pcix_cmd;
6917
6918                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6919                                      &pcix_cmd);
6920                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6921                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6922                         pcix_cmd |= PCI_X_CMD_READ_2K;
6923                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6924                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6925                         pcix_cmd |= PCI_X_CMD_READ_2K;
6926                 }
6927                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6928                                       pcix_cmd);
6929         }
6930
6931         tw32_f(RDMAC_MODE, rdmac_mode);
6932         udelay(40);
6933
6934         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6935         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6936                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6937
6938         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6939                 tw32(SNDDATAC_MODE,
6940                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6941         else
6942                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6943
6944         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6945         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6946         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6947         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6948         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6949                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6950         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6951         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6952
6953         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6954                 err = tg3_load_5701_a0_firmware_fix(tp);
6955                 if (err)
6956                         return err;
6957         }
6958
6959         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6960                 err = tg3_load_tso_firmware(tp);
6961                 if (err)
6962                         return err;
6963         }
6964
6965         tp->tx_mode = TX_MODE_ENABLE;
6966         tw32_f(MAC_TX_MODE, tp->tx_mode);
6967         udelay(100);
6968
6969         tp->rx_mode = RX_MODE_ENABLE;
6970         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6971             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6972                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6973
6974         tw32_f(MAC_RX_MODE, tp->rx_mode);
6975         udelay(10);
6976
6977         if (tp->link_config.phy_is_low_power) {
6978                 tp->link_config.phy_is_low_power = 0;
6979                 tp->link_config.speed = tp->link_config.orig_speed;
6980                 tp->link_config.duplex = tp->link_config.orig_duplex;
6981                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6982         }
6983
6984         tp->mi_mode = MAC_MI_MODE_BASE;
6985         tw32_f(MAC_MI_MODE, tp->mi_mode);
6986         udelay(80);
6987
6988         tw32(MAC_LED_CTRL, tp->led_ctrl);
6989
6990         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6991         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6992                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6993                 udelay(10);
6994         }
6995         tw32_f(MAC_RX_MODE, tp->rx_mode);
6996         udelay(10);
6997
6998         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6999                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7000                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7001                         /* Set drive transmission level to 1.2V  */
7002                         /* only if the signal pre-emphasis bit is not set  */
7003                         val = tr32(MAC_SERDES_CFG);
7004                         val &= 0xfffff000;
7005                         val |= 0x880;
7006                         tw32(MAC_SERDES_CFG, val);
7007                 }
7008                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7009                         tw32(MAC_SERDES_CFG, 0x616000);
7010         }
7011
7012         /* Prevent chip from dropping frames when flow control
7013          * is enabled.
7014          */
7015         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7016
7017         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7018             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7019                 /* Use hardware link auto-negotiation */
7020                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7021         }
7022
7023         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7024             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7025                 u32 tmp;
7026
7027                 tmp = tr32(SERDES_RX_CTRL);
7028                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7029                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7030                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7031                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7032         }
7033
7034         err = tg3_setup_phy(tp, 0);
7035         if (err)
7036                 return err;
7037
7038         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7039             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7040                 u32 tmp;
7041
7042                 /* Clear CRC stats. */
7043                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7044                         tg3_writephy(tp, MII_TG3_TEST1,
7045                                      tmp | MII_TG3_TEST1_CRC_EN);
7046                         tg3_readphy(tp, 0x14, &tmp);
7047                 }
7048         }
7049
7050         __tg3_set_rx_mode(tp->dev);
7051
7052         /* Initialize receive rules. */
7053         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7054         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7055         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7056         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7057
7058         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7059             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7060                 limit = 8;
7061         else
7062                 limit = 16;
7063         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7064                 limit -= 4;
7065         switch (limit) {
7066         case 16:
7067                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7068         case 15:
7069                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7070         case 14:
7071                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7072         case 13:
7073                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7074         case 12:
7075                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7076         case 11:
7077                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7078         case 10:
7079                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7080         case 9:
7081                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7082         case 8:
7083                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7084         case 7:
7085                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7086         case 6:
7087                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7088         case 5:
7089                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7090         case 4:
7091                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7092         case 3:
7093                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7094         case 2:
7095         case 1:
7096
7097         default:
7098                 break;
7099         };
7100
7101         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7102                 /* Write our heartbeat update interval to APE. */
7103                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7104                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7105
7106         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7107
7108         return 0;
7109 }
7110
7111 /* Called at device open time to get the chip ready for
7112  * packet processing.  Invoked with tp->lock held.
7113  */
7114 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7115 {
7116         int err;
7117
7118         /* Force the chip into D0. */
7119         err = tg3_set_power_state(tp, PCI_D0);
7120         if (err)
7121                 goto out;
7122
7123         tg3_switch_clocks(tp);
7124
7125         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7126
7127         err = tg3_reset_hw(tp, reset_phy);
7128
7129 out:
7130         return err;
7131 }
7132
7133 #define TG3_STAT_ADD32(PSTAT, REG) \
7134 do {    u32 __val = tr32(REG); \
7135         (PSTAT)->low += __val; \
7136         if ((PSTAT)->low < __val) \
7137                 (PSTAT)->high += 1; \
7138 } while (0)
7139
7140 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7141 {
7142         struct tg3_hw_stats *sp = tp->hw_stats;
7143
7144         if (!netif_carrier_ok(tp->dev))
7145                 return;
7146
7147         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7148         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7149         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7150         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7151         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7152         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7153         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7154         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7155         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7156         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7157         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7158         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7159         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7160
7161         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7162         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7163         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7164         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7165         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7166         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7167         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7168         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7169         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7170         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7171         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7172         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7173         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7174         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7175
7176         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7177         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7178         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7179 }
7180
7181 static void tg3_timer(unsigned long __opaque)
7182 {
7183         struct tg3 *tp = (struct tg3 *) __opaque;
7184
7185         if (tp->irq_sync)
7186                 goto restart_timer;
7187
7188         spin_lock(&tp->lock);
7189
7190         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7191                 /* All of this garbage is because when using non-tagged
7192                  * IRQ status the mailbox/status_block protocol the chip
7193                  * uses with the cpu is race prone.
7194                  */
7195                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7196                         tw32(GRC_LOCAL_CTRL,
7197                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7198                 } else {
7199                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7200                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7201                 }
7202
7203                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7204                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7205                         spin_unlock(&tp->lock);
7206                         schedule_work(&tp->reset_task);
7207                         return;
7208                 }
7209         }
7210
7211         /* This part only runs once per second. */
7212         if (!--tp->timer_counter) {
7213                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7214                         tg3_periodic_fetch_stats(tp);
7215
7216                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7217                         u32 mac_stat;
7218                         int phy_event;
7219
7220                         mac_stat = tr32(MAC_STATUS);
7221
7222                         phy_event = 0;
7223                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7224                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7225                                         phy_event = 1;
7226                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7227                                 phy_event = 1;
7228
7229                         if (phy_event)
7230                                 tg3_setup_phy(tp, 0);
7231                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7232                         u32 mac_stat = tr32(MAC_STATUS);
7233                         int need_setup = 0;
7234
7235                         if (netif_carrier_ok(tp->dev) &&
7236                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7237                                 need_setup = 1;
7238                         }
7239                         if (! netif_carrier_ok(tp->dev) &&
7240                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7241                                          MAC_STATUS_SIGNAL_DET))) {
7242                                 need_setup = 1;
7243                         }
7244                         if (need_setup) {
7245                                 if (!tp->serdes_counter) {
7246                                         tw32_f(MAC_MODE,
7247                                              (tp->mac_mode &
7248                                               ~MAC_MODE_PORT_MODE_MASK));
7249                                         udelay(40);
7250                                         tw32_f(MAC_MODE, tp->mac_mode);
7251                                         udelay(40);
7252                                 }
7253                                 tg3_setup_phy(tp, 0);
7254                         }
7255                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7256                         tg3_serdes_parallel_detect(tp);
7257
7258                 tp->timer_counter = tp->timer_multiplier;
7259         }
7260
7261         /* Heartbeat is only sent once every 2 seconds.
7262          *
7263          * The heartbeat is to tell the ASF firmware that the host
7264          * driver is still alive.  In the event that the OS crashes,
7265          * ASF needs to reset the hardware to free up the FIFO space
7266          * that may be filled with rx packets destined for the host.
7267          * If the FIFO is full, ASF will no longer function properly.
7268          *
7269          * Unintended resets have been reported on real time kernels
7270          * where the timer doesn't run on time.  Netpoll will also have
7271          * same problem.
7272          *
7273          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7274          * to check the ring condition when the heartbeat is expiring
7275          * before doing the reset.  This will prevent most unintended
7276          * resets.
7277          */
7278         if (!--tp->asf_counter) {
7279                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7280                         u32 val;
7281
7282                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7283                                       FWCMD_NICDRV_ALIVE3);
7284                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7285                         /* 5 seconds timeout */
7286                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7287                         val = tr32(GRC_RX_CPU_EVENT);
7288                         val |= (1 << 14);
7289                         tw32(GRC_RX_CPU_EVENT, val);
7290                 }
7291                 tp->asf_counter = tp->asf_multiplier;
7292         }
7293
7294         spin_unlock(&tp->lock);
7295
7296 restart_timer:
7297         tp->timer.expires = jiffies + tp->timer_offset;
7298         add_timer(&tp->timer);
7299 }
7300
7301 static int tg3_request_irq(struct tg3 *tp)
7302 {
7303         irq_handler_t fn;
7304         unsigned long flags;
7305         struct net_device *dev = tp->dev;
7306
7307         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7308                 fn = tg3_msi;
7309                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7310                         fn = tg3_msi_1shot;
7311                 flags = IRQF_SAMPLE_RANDOM;
7312         } else {
7313                 fn = tg3_interrupt;
7314                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7315                         fn = tg3_interrupt_tagged;
7316                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7317         }
7318         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7319 }
7320
7321 static int tg3_test_interrupt(struct tg3 *tp)
7322 {
7323         struct net_device *dev = tp->dev;
7324         int err, i, intr_ok = 0;
7325
7326         if (!netif_running(dev))
7327                 return -ENODEV;
7328
7329         tg3_disable_ints(tp);
7330
7331         free_irq(tp->pdev->irq, dev);
7332
7333         err = request_irq(tp->pdev->irq, tg3_test_isr,
7334                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7335         if (err)
7336                 return err;
7337
7338         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7339         tg3_enable_ints(tp);
7340
7341         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7342                HOSTCC_MODE_NOW);
7343
7344         for (i = 0; i < 5; i++) {
7345                 u32 int_mbox, misc_host_ctrl;
7346
7347                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7348                                         TG3_64BIT_REG_LOW);
7349                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7350
7351                 if ((int_mbox != 0) ||
7352                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7353                         intr_ok = 1;
7354                         break;
7355                 }
7356
7357                 msleep(10);
7358         }
7359
7360         tg3_disable_ints(tp);
7361
7362         free_irq(tp->pdev->irq, dev);
7363
7364         err = tg3_request_irq(tp);
7365
7366         if (err)
7367                 return err;
7368
7369         if (intr_ok)
7370                 return 0;
7371
7372         return -EIO;
7373 }
7374
7375 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7376  * successfully restored
7377  */
7378 static int tg3_test_msi(struct tg3 *tp)
7379 {
7380         struct net_device *dev = tp->dev;
7381         int err;
7382         u16 pci_cmd;
7383
7384         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7385                 return 0;
7386
7387         /* Turn off SERR reporting in case MSI terminates with Master
7388          * Abort.
7389          */
7390         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7391         pci_write_config_word(tp->pdev, PCI_COMMAND,
7392                               pci_cmd & ~PCI_COMMAND_SERR);
7393
7394         err = tg3_test_interrupt(tp);
7395
7396         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7397
7398         if (!err)
7399                 return 0;
7400
7401         /* other failures */
7402         if (err != -EIO)
7403                 return err;
7404
7405         /* MSI test failed, go back to INTx mode */
7406         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7407                "switching to INTx mode. Please report this failure to "
7408                "the PCI maintainer and include system chipset information.\n",
7409                        tp->dev->name);
7410
7411         free_irq(tp->pdev->irq, dev);
7412         pci_disable_msi(tp->pdev);
7413
7414         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7415
7416         err = tg3_request_irq(tp);
7417         if (err)
7418                 return err;
7419
7420         /* Need to reset the chip because the MSI cycle may have terminated
7421          * with Master Abort.
7422          */
7423         tg3_full_lock(tp, 1);
7424
7425         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7426         err = tg3_init_hw(tp, 1);
7427
7428         tg3_full_unlock(tp);
7429
7430         if (err)
7431                 free_irq(tp->pdev->irq, dev);
7432
7433         return err;
7434 }
7435
7436 static int tg3_open(struct net_device *dev)
7437 {
7438         struct tg3 *tp = netdev_priv(dev);
7439         int err;
7440
7441         netif_carrier_off(tp->dev);
7442
7443         tg3_full_lock(tp, 0);
7444
7445         err = tg3_set_power_state(tp, PCI_D0);
7446         if (err) {
7447                 tg3_full_unlock(tp);
7448                 return err;
7449         }
7450
7451         tg3_disable_ints(tp);
7452         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7453
7454         tg3_full_unlock(tp);
7455
7456         /* The placement of this call is tied
7457          * to the setup and use of Host TX descriptors.
7458          */
7459         err = tg3_alloc_consistent(tp);
7460         if (err)
7461                 return err;
7462
7463         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7464                 /* All MSI supporting chips should support tagged
7465                  * status.  Assert that this is the case.
7466                  */
7467                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7468                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7469                                "Not using MSI.\n", tp->dev->name);
7470                 } else if (pci_enable_msi(tp->pdev) == 0) {
7471                         u32 msi_mode;
7472
7473                         msi_mode = tr32(MSGINT_MODE);
7474                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7475                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7476                 }
7477         }
7478         err = tg3_request_irq(tp);
7479
7480         if (err) {
7481                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7482                         pci_disable_msi(tp->pdev);
7483                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7484                 }
7485                 tg3_free_consistent(tp);
7486                 return err;
7487         }
7488
7489         napi_enable(&tp->napi);
7490
7491         tg3_full_lock(tp, 0);
7492
7493         err = tg3_init_hw(tp, 1);
7494         if (err) {
7495                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7496                 tg3_free_rings(tp);
7497         } else {
7498                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7499                         tp->timer_offset = HZ;
7500                 else
7501                         tp->timer_offset = HZ / 10;
7502
7503                 BUG_ON(tp->timer_offset > HZ);
7504                 tp->timer_counter = tp->timer_multiplier =
7505                         (HZ / tp->timer_offset);
7506                 tp->asf_counter = tp->asf_multiplier =
7507                         ((HZ / tp->timer_offset) * 2);
7508
7509                 init_timer(&tp->timer);
7510                 tp->timer.expires = jiffies + tp->timer_offset;
7511                 tp->timer.data = (unsigned long) tp;
7512                 tp->timer.function = tg3_timer;
7513         }
7514
7515         tg3_full_unlock(tp);
7516
7517         if (err) {
7518                 napi_disable(&tp->napi);
7519                 free_irq(tp->pdev->irq, dev);
7520                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7521                         pci_disable_msi(tp->pdev);
7522                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7523                 }
7524                 tg3_free_consistent(tp);
7525                 return err;
7526         }
7527
7528         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7529                 err = tg3_test_msi(tp);
7530
7531                 if (err) {
7532                         tg3_full_lock(tp, 0);
7533
7534                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7535                                 pci_disable_msi(tp->pdev);
7536                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7537                         }
7538                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7539                         tg3_free_rings(tp);
7540                         tg3_free_consistent(tp);
7541
7542                         tg3_full_unlock(tp);
7543
7544                         napi_disable(&tp->napi);
7545
7546                         return err;
7547                 }
7548
7549                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7550                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7551                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7552
7553                                 tw32(PCIE_TRANSACTION_CFG,
7554                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7555                         }
7556                 }
7557         }
7558
7559         tg3_full_lock(tp, 0);
7560
7561         add_timer(&tp->timer);
7562         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7563         tg3_enable_ints(tp);
7564
7565         tg3_full_unlock(tp);
7566
7567         netif_start_queue(dev);
7568
7569         return 0;
7570 }
7571
7572 #if 0
7573 /*static*/ void tg3_dump_state(struct tg3 *tp)
7574 {
7575         u32 val32, val32_2, val32_3, val32_4, val32_5;
7576         u16 val16;
7577         int i;
7578
7579         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7580         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7581         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7582                val16, val32);
7583
7584         /* MAC block */
7585         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7586                tr32(MAC_MODE), tr32(MAC_STATUS));
7587         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7588                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7589         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7590                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7591         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7592                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7593
7594         /* Send data initiator control block */
7595         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7596                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7597         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7598                tr32(SNDDATAI_STATSCTRL));
7599
7600         /* Send data completion control block */
7601         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7602
7603         /* Send BD ring selector block */
7604         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7605                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7606
7607         /* Send BD initiator control block */
7608         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7609                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7610
7611         /* Send BD completion control block */
7612         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7613
7614         /* Receive list placement control block */
7615         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7616                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7617         printk("       RCVLPC_STATSCTRL[%08x]\n",
7618                tr32(RCVLPC_STATSCTRL));
7619
7620         /* Receive data and receive BD initiator control block */
7621         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7622                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7623
7624         /* Receive data completion control block */
7625         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7626                tr32(RCVDCC_MODE));
7627
7628         /* Receive BD initiator control block */
7629         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7630                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7631
7632         /* Receive BD completion control block */
7633         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7634                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7635
7636         /* Receive list selector control block */
7637         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7638                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7639
7640         /* Mbuf cluster free block */
7641         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7642                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7643
7644         /* Host coalescing control block */
7645         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7646                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7647         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7648                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7649                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7650         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7651                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7652                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7653         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7654                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7655         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7656                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7657
7658         /* Memory arbiter control block */
7659         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7660                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7661
7662         /* Buffer manager control block */
7663         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7664                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7665         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7666                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7667         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7668                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7669                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7670                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7671
7672         /* Read DMA control block */
7673         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7674                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7675
7676         /* Write DMA control block */
7677         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7678                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7679
7680         /* DMA completion block */
7681         printk("DEBUG: DMAC_MODE[%08x]\n",
7682                tr32(DMAC_MODE));
7683
7684         /* GRC block */
7685         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7686                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7687         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7688                tr32(GRC_LOCAL_CTRL));
7689
7690         /* TG3_BDINFOs */
7691         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7692                tr32(RCVDBDI_JUMBO_BD + 0x0),
7693                tr32(RCVDBDI_JUMBO_BD + 0x4),
7694                tr32(RCVDBDI_JUMBO_BD + 0x8),
7695                tr32(RCVDBDI_JUMBO_BD + 0xc));
7696         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7697                tr32(RCVDBDI_STD_BD + 0x0),
7698                tr32(RCVDBDI_STD_BD + 0x4),
7699                tr32(RCVDBDI_STD_BD + 0x8),
7700                tr32(RCVDBDI_STD_BD + 0xc));
7701         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7702                tr32(RCVDBDI_MINI_BD + 0x0),
7703                tr32(RCVDBDI_MINI_BD + 0x4),
7704                tr32(RCVDBDI_MINI_BD + 0x8),
7705                tr32(RCVDBDI_MINI_BD + 0xc));
7706
7707         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7708         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7709         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7710         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7711         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7712                val32, val32_2, val32_3, val32_4);
7713
7714         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7715         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7716         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7717         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7718         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7719                val32, val32_2, val32_3, val32_4);
7720
7721         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7722         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7723         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7724         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7725         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7726         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7727                val32, val32_2, val32_3, val32_4, val32_5);
7728
7729         /* SW status block */
7730         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7731                tp->hw_status->status,
7732                tp->hw_status->status_tag,
7733                tp->hw_status->rx_jumbo_consumer,
7734                tp->hw_status->rx_consumer,
7735                tp->hw_status->rx_mini_consumer,
7736                tp->hw_status->idx[0].rx_producer,
7737                tp->hw_status->idx[0].tx_consumer);
7738
7739         /* SW statistics block */
7740         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7741                ((u32 *)tp->hw_stats)[0],
7742                ((u32 *)tp->hw_stats)[1],
7743                ((u32 *)tp->hw_stats)[2],
7744                ((u32 *)tp->hw_stats)[3]);
7745
7746         /* Mailboxes */
7747         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7748                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7749                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7750                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7751                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7752
7753         /* NIC side send descriptors. */
7754         for (i = 0; i < 6; i++) {
7755                 unsigned long txd;
7756
7757                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7758                         + (i * sizeof(struct tg3_tx_buffer_desc));
7759                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7760                        i,
7761                        readl(txd + 0x0), readl(txd + 0x4),
7762                        readl(txd + 0x8), readl(txd + 0xc));
7763         }
7764
7765         /* NIC side RX descriptors. */
7766         for (i = 0; i < 6; i++) {
7767                 unsigned long rxd;
7768
7769                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7770                         + (i * sizeof(struct tg3_rx_buffer_desc));
7771                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7772                        i,
7773                        readl(rxd + 0x0), readl(rxd + 0x4),
7774                        readl(rxd + 0x8), readl(rxd + 0xc));
7775                 rxd += (4 * sizeof(u32));
7776                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7777                        i,
7778                        readl(rxd + 0x0), readl(rxd + 0x4),
7779                        readl(rxd + 0x8), readl(rxd + 0xc));
7780         }
7781
7782         for (i = 0; i < 6; i++) {
7783                 unsigned long rxd;
7784
7785                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7786                         + (i * sizeof(struct tg3_rx_buffer_desc));
7787                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7788                        i,
7789                        readl(rxd + 0x0), readl(rxd + 0x4),
7790                        readl(rxd + 0x8), readl(rxd + 0xc));
7791                 rxd += (4 * sizeof(u32));
7792                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7793                        i,
7794                        readl(rxd + 0x0), readl(rxd + 0x4),
7795                        readl(rxd + 0x8), readl(rxd + 0xc));
7796         }
7797 }
7798 #endif
7799
7800 static struct net_device_stats *tg3_get_stats(struct net_device *);
7801 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7802
7803 static int tg3_close(struct net_device *dev)
7804 {
7805         struct tg3 *tp = netdev_priv(dev);
7806
7807         napi_disable(&tp->napi);
7808         cancel_work_sync(&tp->reset_task);
7809
7810         netif_stop_queue(dev);
7811
7812         del_timer_sync(&tp->timer);
7813
7814         tg3_full_lock(tp, 1);
7815 #if 0
7816         tg3_dump_state(tp);
7817 #endif
7818
7819         tg3_disable_ints(tp);
7820
7821         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7822         tg3_free_rings(tp);
7823         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7824
7825         tg3_full_unlock(tp);
7826
7827         free_irq(tp->pdev->irq, dev);
7828         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7829                 pci_disable_msi(tp->pdev);
7830                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7831         }
7832
7833         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7834                sizeof(tp->net_stats_prev));
7835         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7836                sizeof(tp->estats_prev));
7837
7838         tg3_free_consistent(tp);
7839
7840         tg3_set_power_state(tp, PCI_D3hot);
7841
7842         netif_carrier_off(tp->dev);
7843
7844         return 0;
7845 }
7846
7847 static inline unsigned long get_stat64(tg3_stat64_t *val)
7848 {
7849         unsigned long ret;
7850
7851 #if (BITS_PER_LONG == 32)
7852         ret = val->low;
7853 #else
7854         ret = ((u64)val->high << 32) | ((u64)val->low);
7855 #endif
7856         return ret;
7857 }
7858
7859 static unsigned long calc_crc_errors(struct tg3 *tp)
7860 {
7861         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7862
7863         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7864             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7865              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7866                 u32 val;
7867
7868                 spin_lock_bh(&tp->lock);
7869                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7870                         tg3_writephy(tp, MII_TG3_TEST1,
7871                                      val | MII_TG3_TEST1_CRC_EN);
7872                         tg3_readphy(tp, 0x14, &val);
7873                 } else
7874                         val = 0;
7875                 spin_unlock_bh(&tp->lock);
7876
7877                 tp->phy_crc_errors += val;
7878
7879                 return tp->phy_crc_errors;
7880         }
7881
7882         return get_stat64(&hw_stats->rx_fcs_errors);
7883 }
7884
7885 #define ESTAT_ADD(member) \
7886         estats->member =        old_estats->member + \
7887                                 get_stat64(&hw_stats->member)
7888
7889 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7890 {
7891         struct tg3_ethtool_stats *estats = &tp->estats;
7892         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7893         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7894
7895         if (!hw_stats)
7896                 return old_estats;
7897
7898         ESTAT_ADD(rx_octets);
7899         ESTAT_ADD(rx_fragments);
7900         ESTAT_ADD(rx_ucast_packets);
7901         ESTAT_ADD(rx_mcast_packets);
7902         ESTAT_ADD(rx_bcast_packets);
7903         ESTAT_ADD(rx_fcs_errors);
7904         ESTAT_ADD(rx_align_errors);
7905         ESTAT_ADD(rx_xon_pause_rcvd);
7906         ESTAT_ADD(rx_xoff_pause_rcvd);
7907         ESTAT_ADD(rx_mac_ctrl_rcvd);
7908         ESTAT_ADD(rx_xoff_entered);
7909         ESTAT_ADD(rx_frame_too_long_errors);
7910         ESTAT_ADD(rx_jabbers);
7911         ESTAT_ADD(rx_undersize_packets);
7912         ESTAT_ADD(rx_in_length_errors);
7913         ESTAT_ADD(rx_out_length_errors);
7914         ESTAT_ADD(rx_64_or_less_octet_packets);
7915         ESTAT_ADD(rx_65_to_127_octet_packets);
7916         ESTAT_ADD(rx_128_to_255_octet_packets);
7917         ESTAT_ADD(rx_256_to_511_octet_packets);
7918         ESTAT_ADD(rx_512_to_1023_octet_packets);
7919         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7920         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7921         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7922         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7923         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7924
7925         ESTAT_ADD(tx_octets);
7926         ESTAT_ADD(tx_collisions);
7927         ESTAT_ADD(tx_xon_sent);
7928         ESTAT_ADD(tx_xoff_sent);
7929         ESTAT_ADD(tx_flow_control);
7930         ESTAT_ADD(tx_mac_errors);
7931         ESTAT_ADD(tx_single_collisions);
7932         ESTAT_ADD(tx_mult_collisions);
7933         ESTAT_ADD(tx_deferred);
7934         ESTAT_ADD(tx_excessive_collisions);
7935         ESTAT_ADD(tx_late_collisions);
7936         ESTAT_ADD(tx_collide_2times);
7937         ESTAT_ADD(tx_collide_3times);
7938         ESTAT_ADD(tx_collide_4times);
7939         ESTAT_ADD(tx_collide_5times);
7940         ESTAT_ADD(tx_collide_6times);
7941         ESTAT_ADD(tx_collide_7times);
7942         ESTAT_ADD(tx_collide_8times);
7943         ESTAT_ADD(tx_collide_9times);
7944         ESTAT_ADD(tx_collide_10times);
7945         ESTAT_ADD(tx_collide_11times);
7946         ESTAT_ADD(tx_collide_12times);
7947         ESTAT_ADD(tx_collide_13times);
7948         ESTAT_ADD(tx_collide_14times);
7949         ESTAT_ADD(tx_collide_15times);
7950         ESTAT_ADD(tx_ucast_packets);
7951         ESTAT_ADD(tx_mcast_packets);
7952         ESTAT_ADD(tx_bcast_packets);
7953         ESTAT_ADD(tx_carrier_sense_errors);
7954         ESTAT_ADD(tx_discards);
7955         ESTAT_ADD(tx_errors);
7956
7957         ESTAT_ADD(dma_writeq_full);
7958         ESTAT_ADD(dma_write_prioq_full);
7959         ESTAT_ADD(rxbds_empty);
7960         ESTAT_ADD(rx_discards);
7961         ESTAT_ADD(rx_errors);
7962         ESTAT_ADD(rx_threshold_hit);
7963
7964         ESTAT_ADD(dma_readq_full);
7965         ESTAT_ADD(dma_read_prioq_full);
7966         ESTAT_ADD(tx_comp_queue_full);
7967
7968         ESTAT_ADD(ring_set_send_prod_index);
7969         ESTAT_ADD(ring_status_update);
7970         ESTAT_ADD(nic_irqs);
7971         ESTAT_ADD(nic_avoided_irqs);
7972         ESTAT_ADD(nic_tx_threshold_hit);
7973
7974         return estats;
7975 }
7976
7977 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7978 {
7979         struct tg3 *tp = netdev_priv(dev);
7980         struct net_device_stats *stats = &tp->net_stats;
7981         struct net_device_stats *old_stats = &tp->net_stats_prev;
7982         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7983
7984         if (!hw_stats)
7985                 return old_stats;
7986
7987         stats->rx_packets = old_stats->rx_packets +
7988                 get_stat64(&hw_stats->rx_ucast_packets) +
7989                 get_stat64(&hw_stats->rx_mcast_packets) +
7990                 get_stat64(&hw_stats->rx_bcast_packets);
7991
7992         stats->tx_packets = old_stats->tx_packets +
7993                 get_stat64(&hw_stats->tx_ucast_packets) +
7994                 get_stat64(&hw_stats->tx_mcast_packets) +
7995                 get_stat64(&hw_stats->tx_bcast_packets);
7996
7997         stats->rx_bytes = old_stats->rx_bytes +
7998                 get_stat64(&hw_stats->rx_octets);
7999         stats->tx_bytes = old_stats->tx_bytes +
8000                 get_stat64(&hw_stats->tx_octets);
8001
8002         stats->rx_errors = old_stats->rx_errors +
8003                 get_stat64(&hw_stats->rx_errors);
8004         stats->tx_errors = old_stats->tx_errors +
8005                 get_stat64(&hw_stats->tx_errors) +
8006                 get_stat64(&hw_stats->tx_mac_errors) +
8007                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8008                 get_stat64(&hw_stats->tx_discards);
8009
8010         stats->multicast = old_stats->multicast +
8011                 get_stat64(&hw_stats->rx_mcast_packets);
8012         stats->collisions = old_stats->collisions +
8013                 get_stat64(&hw_stats->tx_collisions);
8014
8015         stats->rx_length_errors = old_stats->rx_length_errors +
8016                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8017                 get_stat64(&hw_stats->rx_undersize_packets);
8018
8019         stats->rx_over_errors = old_stats->rx_over_errors +
8020                 get_stat64(&hw_stats->rxbds_empty);
8021         stats->rx_frame_errors = old_stats->rx_frame_errors +
8022                 get_stat64(&hw_stats->rx_align_errors);
8023         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8024                 get_stat64(&hw_stats->tx_discards);
8025         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8026                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8027
8028         stats->rx_crc_errors = old_stats->rx_crc_errors +
8029                 calc_crc_errors(tp);
8030
8031         stats->rx_missed_errors = old_stats->rx_missed_errors +
8032                 get_stat64(&hw_stats->rx_discards);
8033
8034         return stats;
8035 }
8036
8037 static inline u32 calc_crc(unsigned char *buf, int len)
8038 {
8039         u32 reg;
8040         u32 tmp;
8041         int j, k;
8042
8043         reg = 0xffffffff;
8044
8045         for (j = 0; j < len; j++) {
8046                 reg ^= buf[j];
8047
8048                 for (k = 0; k < 8; k++) {
8049                         tmp = reg & 0x01;
8050
8051                         reg >>= 1;
8052
8053                         if (tmp) {
8054                                 reg ^= 0xedb88320;
8055                         }
8056                 }
8057         }
8058
8059         return ~reg;
8060 }
8061
8062 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8063 {
8064         /* accept or reject all multicast frames */
8065         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8066         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8067         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8068         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8069 }
8070
8071 static void __tg3_set_rx_mode(struct net_device *dev)
8072 {
8073         struct tg3 *tp = netdev_priv(dev);
8074         u32 rx_mode;
8075
8076         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8077                                   RX_MODE_KEEP_VLAN_TAG);
8078
8079         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8080          * flag clear.
8081          */
8082 #if TG3_VLAN_TAG_USED
8083         if (!tp->vlgrp &&
8084             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8085                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8086 #else
8087         /* By definition, VLAN is disabled always in this
8088          * case.
8089          */
8090         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8091                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8092 #endif
8093
8094         if (dev->flags & IFF_PROMISC) {
8095                 /* Promiscuous mode. */
8096                 rx_mode |= RX_MODE_PROMISC;
8097         } else if (dev->flags & IFF_ALLMULTI) {
8098                 /* Accept all multicast. */
8099                 tg3_set_multi (tp, 1);
8100         } else if (dev->mc_count < 1) {
8101                 /* Reject all multicast. */
8102                 tg3_set_multi (tp, 0);
8103         } else {
8104                 /* Accept one or more multicast(s). */
8105                 struct dev_mc_list *mclist;
8106                 unsigned int i;
8107                 u32 mc_filter[4] = { 0, };
8108                 u32 regidx;
8109                 u32 bit;
8110                 u32 crc;
8111
8112                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8113                      i++, mclist = mclist->next) {
8114
8115                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8116                         bit = ~crc & 0x7f;
8117                         regidx = (bit & 0x60) >> 5;
8118                         bit &= 0x1f;
8119                         mc_filter[regidx] |= (1 << bit);
8120                 }
8121
8122                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8123                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8124                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8125                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8126         }
8127
8128         if (rx_mode != tp->rx_mode) {
8129                 tp->rx_mode = rx_mode;
8130                 tw32_f(MAC_RX_MODE, rx_mode);
8131                 udelay(10);
8132         }
8133 }
8134
8135 static void tg3_set_rx_mode(struct net_device *dev)
8136 {
8137         struct tg3 *tp = netdev_priv(dev);
8138
8139         if (!netif_running(dev))
8140                 return;
8141
8142         tg3_full_lock(tp, 0);
8143         __tg3_set_rx_mode(dev);
8144         tg3_full_unlock(tp);
8145 }
8146
8147 #define TG3_REGDUMP_LEN         (32 * 1024)
8148
8149 static int tg3_get_regs_len(struct net_device *dev)
8150 {
8151         return TG3_REGDUMP_LEN;
8152 }
8153
8154 static void tg3_get_regs(struct net_device *dev,
8155                 struct ethtool_regs *regs, void *_p)
8156 {
8157         u32 *p = _p;
8158         struct tg3 *tp = netdev_priv(dev);
8159         u8 *orig_p = _p;
8160         int i;
8161
8162         regs->version = 0;
8163
8164         memset(p, 0, TG3_REGDUMP_LEN);
8165
8166         if (tp->link_config.phy_is_low_power)
8167                 return;
8168
8169         tg3_full_lock(tp, 0);
8170
8171 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8172 #define GET_REG32_LOOP(base,len)                \
8173 do {    p = (u32 *)(orig_p + (base));           \
8174         for (i = 0; i < len; i += 4)            \
8175                 __GET_REG32((base) + i);        \
8176 } while (0)
8177 #define GET_REG32_1(reg)                        \
8178 do {    p = (u32 *)(orig_p + (reg));            \
8179         __GET_REG32((reg));                     \
8180 } while (0)
8181
8182         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8183         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8184         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8185         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8186         GET_REG32_1(SNDDATAC_MODE);
8187         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8188         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8189         GET_REG32_1(SNDBDC_MODE);
8190         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8191         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8192         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8193         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8194         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8195         GET_REG32_1(RCVDCC_MODE);
8196         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8197         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8198         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8199         GET_REG32_1(MBFREE_MODE);
8200         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8201         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8202         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8203         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8204         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8205         GET_REG32_1(RX_CPU_MODE);
8206         GET_REG32_1(RX_CPU_STATE);
8207         GET_REG32_1(RX_CPU_PGMCTR);
8208         GET_REG32_1(RX_CPU_HWBKPT);
8209         GET_REG32_1(TX_CPU_MODE);
8210         GET_REG32_1(TX_CPU_STATE);
8211         GET_REG32_1(TX_CPU_PGMCTR);
8212         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8213         GET_REG32_LOOP(FTQ_RESET, 0x120);
8214         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8215         GET_REG32_1(DMAC_MODE);
8216         GET_REG32_LOOP(GRC_MODE, 0x4c);
8217         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8218                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8219
8220 #undef __GET_REG32
8221 #undef GET_REG32_LOOP
8222 #undef GET_REG32_1
8223
8224         tg3_full_unlock(tp);
8225 }
8226
8227 static int tg3_get_eeprom_len(struct net_device *dev)
8228 {
8229         struct tg3 *tp = netdev_priv(dev);
8230
8231         return tp->nvram_size;
8232 }
8233
8234 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8235 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8236 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8237
8238 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8239 {
8240         struct tg3 *tp = netdev_priv(dev);
8241         int ret;
8242         u8  *pd;
8243         u32 i, offset, len, b_offset, b_count;
8244         __le32 val;
8245
8246         if (tp->link_config.phy_is_low_power)
8247                 return -EAGAIN;
8248
8249         offset = eeprom->offset;
8250         len = eeprom->len;
8251         eeprom->len = 0;
8252
8253         eeprom->magic = TG3_EEPROM_MAGIC;
8254
8255         if (offset & 3) {
8256                 /* adjustments to start on required 4 byte boundary */
8257                 b_offset = offset & 3;
8258                 b_count = 4 - b_offset;
8259                 if (b_count > len) {
8260                         /* i.e. offset=1 len=2 */
8261                         b_count = len;
8262                 }
8263                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8264                 if (ret)
8265                         return ret;
8266                 memcpy(data, ((char*)&val) + b_offset, b_count);
8267                 len -= b_count;
8268                 offset += b_count;
8269                 eeprom->len += b_count;
8270         }
8271
8272         /* read bytes upto the last 4 byte boundary */
8273         pd = &data[eeprom->len];
8274         for (i = 0; i < (len - (len & 3)); i += 4) {
8275                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8276                 if (ret) {
8277                         eeprom->len += i;
8278                         return ret;
8279                 }
8280                 memcpy(pd + i, &val, 4);
8281         }
8282         eeprom->len += i;
8283
8284         if (len & 3) {
8285                 /* read last bytes not ending on 4 byte boundary */
8286                 pd = &data[eeprom->len];
8287                 b_count = len & 3;
8288                 b_offset = offset + len - b_count;
8289                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8290                 if (ret)
8291                         return ret;
8292                 memcpy(pd, &val, b_count);
8293                 eeprom->len += b_count;
8294         }
8295         return 0;
8296 }
8297
8298 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8299
8300 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8301 {
8302         struct tg3 *tp = netdev_priv(dev);
8303         int ret;
8304         u32 offset, len, b_offset, odd_len;
8305         u8 *buf;
8306         __le32 start, end;
8307
8308         if (tp->link_config.phy_is_low_power)
8309                 return -EAGAIN;
8310
8311         if (eeprom->magic != TG3_EEPROM_MAGIC)
8312                 return -EINVAL;
8313
8314         offset = eeprom->offset;
8315         len = eeprom->len;
8316
8317         if ((b_offset = (offset & 3))) {
8318                 /* adjustments to start on required 4 byte boundary */
8319                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8320                 if (ret)
8321                         return ret;
8322                 len += b_offset;
8323                 offset &= ~3;
8324                 if (len < 4)
8325                         len = 4;
8326         }
8327
8328         odd_len = 0;
8329         if (len & 3) {
8330                 /* adjustments to end on required 4 byte boundary */
8331                 odd_len = 1;
8332                 len = (len + 3) & ~3;
8333                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8334                 if (ret)
8335                         return ret;
8336         }
8337
8338         buf = data;
8339         if (b_offset || odd_len) {
8340                 buf = kmalloc(len, GFP_KERNEL);
8341                 if (!buf)
8342                         return -ENOMEM;
8343                 if (b_offset)
8344                         memcpy(buf, &start, 4);
8345                 if (odd_len)
8346                         memcpy(buf+len-4, &end, 4);
8347                 memcpy(buf + b_offset, data, eeprom->len);
8348         }
8349
8350         ret = tg3_nvram_write_block(tp, offset, len, buf);
8351
8352         if (buf != data)
8353                 kfree(buf);
8354
8355         return ret;
8356 }
8357
8358 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8359 {
8360         struct tg3 *tp = netdev_priv(dev);
8361
8362         cmd->supported = (SUPPORTED_Autoneg);
8363
8364         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8365                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8366                                    SUPPORTED_1000baseT_Full);
8367
8368         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8369                 cmd->supported |= (SUPPORTED_100baseT_Half |
8370                                   SUPPORTED_100baseT_Full |
8371                                   SUPPORTED_10baseT_Half |
8372                                   SUPPORTED_10baseT_Full |
8373                                   SUPPORTED_TP);
8374                 cmd->port = PORT_TP;
8375         } else {
8376                 cmd->supported |= SUPPORTED_FIBRE;
8377                 cmd->port = PORT_FIBRE;
8378         }
8379
8380         cmd->advertising = tp->link_config.advertising;
8381         if (netif_running(dev)) {
8382                 cmd->speed = tp->link_config.active_speed;
8383                 cmd->duplex = tp->link_config.active_duplex;
8384         }
8385         cmd->phy_address = PHY_ADDR;
8386         cmd->transceiver = 0;
8387         cmd->autoneg = tp->link_config.autoneg;
8388         cmd->maxtxpkt = 0;
8389         cmd->maxrxpkt = 0;
8390         return 0;
8391 }
8392
8393 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8394 {
8395         struct tg3 *tp = netdev_priv(dev);
8396
8397         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8398                 /* These are the only valid advertisement bits allowed.  */
8399                 if (cmd->autoneg == AUTONEG_ENABLE &&
8400                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8401                                           ADVERTISED_1000baseT_Full |
8402                                           ADVERTISED_Autoneg |
8403                                           ADVERTISED_FIBRE)))
8404                         return -EINVAL;
8405                 /* Fiber can only do SPEED_1000.  */
8406                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8407                          (cmd->speed != SPEED_1000))
8408                         return -EINVAL;
8409         /* Copper cannot force SPEED_1000.  */
8410         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8411                    (cmd->speed == SPEED_1000))
8412                 return -EINVAL;
8413         else if ((cmd->speed == SPEED_1000) &&
8414                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8415                 return -EINVAL;
8416
8417         tg3_full_lock(tp, 0);
8418
8419         tp->link_config.autoneg = cmd->autoneg;
8420         if (cmd->autoneg == AUTONEG_ENABLE) {
8421                 tp->link_config.advertising = (cmd->advertising |
8422                                               ADVERTISED_Autoneg);
8423                 tp->link_config.speed = SPEED_INVALID;
8424                 tp->link_config.duplex = DUPLEX_INVALID;
8425         } else {
8426                 tp->link_config.advertising = 0;
8427                 tp->link_config.speed = cmd->speed;
8428                 tp->link_config.duplex = cmd->duplex;
8429         }
8430
8431         tp->link_config.orig_speed = tp->link_config.speed;
8432         tp->link_config.orig_duplex = tp->link_config.duplex;
8433         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8434
8435         if (netif_running(dev))
8436                 tg3_setup_phy(tp, 1);
8437
8438         tg3_full_unlock(tp);
8439
8440         return 0;
8441 }
8442
8443 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8444 {
8445         struct tg3 *tp = netdev_priv(dev);
8446
8447         strcpy(info->driver, DRV_MODULE_NAME);
8448         strcpy(info->version, DRV_MODULE_VERSION);
8449         strcpy(info->fw_version, tp->fw_ver);
8450         strcpy(info->bus_info, pci_name(tp->pdev));
8451 }
8452
8453 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8454 {
8455         struct tg3 *tp = netdev_priv(dev);
8456
8457         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8458                 wol->supported = WAKE_MAGIC;
8459         else
8460                 wol->supported = 0;
8461         wol->wolopts = 0;
8462         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8463                 wol->wolopts = WAKE_MAGIC;
8464         memset(&wol->sopass, 0, sizeof(wol->sopass));
8465 }
8466
8467 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8468 {
8469         struct tg3 *tp = netdev_priv(dev);
8470
8471         if (wol->wolopts & ~WAKE_MAGIC)
8472                 return -EINVAL;
8473         if ((wol->wolopts & WAKE_MAGIC) &&
8474             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8475                 return -EINVAL;
8476
8477         spin_lock_bh(&tp->lock);
8478         if (wol->wolopts & WAKE_MAGIC)
8479                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8480         else
8481                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8482         spin_unlock_bh(&tp->lock);
8483
8484         return 0;
8485 }
8486
8487 static u32 tg3_get_msglevel(struct net_device *dev)
8488 {
8489         struct tg3 *tp = netdev_priv(dev);
8490         return tp->msg_enable;
8491 }
8492
8493 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8494 {
8495         struct tg3 *tp = netdev_priv(dev);
8496         tp->msg_enable = value;
8497 }
8498
8499 static int tg3_set_tso(struct net_device *dev, u32 value)
8500 {
8501         struct tg3 *tp = netdev_priv(dev);
8502
8503         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8504                 if (value)
8505                         return -EINVAL;
8506                 return 0;
8507         }
8508         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8509             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8510                 if (value) {
8511                         dev->features |= NETIF_F_TSO6;
8512                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8513                                 dev->features |= NETIF_F_TSO_ECN;
8514                 } else
8515                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8516         }
8517         return ethtool_op_set_tso(dev, value);
8518 }
8519
8520 static int tg3_nway_reset(struct net_device *dev)
8521 {
8522         struct tg3 *tp = netdev_priv(dev);
8523         u32 bmcr;
8524         int r;
8525
8526         if (!netif_running(dev))
8527                 return -EAGAIN;
8528
8529         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8530                 return -EINVAL;
8531
8532         spin_lock_bh(&tp->lock);
8533         r = -EINVAL;
8534         tg3_readphy(tp, MII_BMCR, &bmcr);
8535         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8536             ((bmcr & BMCR_ANENABLE) ||
8537              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8538                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8539                                            BMCR_ANENABLE);
8540                 r = 0;
8541         }
8542         spin_unlock_bh(&tp->lock);
8543
8544         return r;
8545 }
8546
8547 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8548 {
8549         struct tg3 *tp = netdev_priv(dev);
8550
8551         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8552         ering->rx_mini_max_pending = 0;
8553         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8554                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8555         else
8556                 ering->rx_jumbo_max_pending = 0;
8557
8558         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8559
8560         ering->rx_pending = tp->rx_pending;
8561         ering->rx_mini_pending = 0;
8562         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8563                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8564         else
8565                 ering->rx_jumbo_pending = 0;
8566
8567         ering->tx_pending = tp->tx_pending;
8568 }
8569
8570 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8571 {
8572         struct tg3 *tp = netdev_priv(dev);
8573         int irq_sync = 0, err = 0;
8574
8575         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8576             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8577             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8578             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8579             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8580              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8581                 return -EINVAL;
8582
8583         if (netif_running(dev)) {
8584                 tg3_netif_stop(tp);
8585                 irq_sync = 1;
8586         }
8587
8588         tg3_full_lock(tp, irq_sync);
8589
8590         tp->rx_pending = ering->rx_pending;
8591
8592         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8593             tp->rx_pending > 63)
8594                 tp->rx_pending = 63;
8595         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8596         tp->tx_pending = ering->tx_pending;
8597
8598         if (netif_running(dev)) {
8599                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8600                 err = tg3_restart_hw(tp, 1);
8601                 if (!err)
8602                         tg3_netif_start(tp);
8603         }
8604
8605         tg3_full_unlock(tp);
8606
8607         return err;
8608 }
8609
8610 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8611 {
8612         struct tg3 *tp = netdev_priv(dev);
8613
8614         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8615
8616         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8617                 epause->rx_pause = 1;
8618         else
8619                 epause->rx_pause = 0;
8620
8621         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8622                 epause->tx_pause = 1;
8623         else
8624                 epause->tx_pause = 0;
8625 }
8626
8627 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8628 {
8629         struct tg3 *tp = netdev_priv(dev);
8630         int irq_sync = 0, err = 0;
8631
8632         if (netif_running(dev)) {
8633                 tg3_netif_stop(tp);
8634                 irq_sync = 1;
8635         }
8636
8637         tg3_full_lock(tp, irq_sync);
8638
8639         if (epause->autoneg)
8640                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8641         else
8642                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8643         if (epause->rx_pause)
8644                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8645         else
8646                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8647         if (epause->tx_pause)
8648                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8649         else
8650                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8651
8652         if (netif_running(dev)) {
8653                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8654                 err = tg3_restart_hw(tp, 1);
8655                 if (!err)
8656                         tg3_netif_start(tp);
8657         }
8658
8659         tg3_full_unlock(tp);
8660
8661         return err;
8662 }
8663
8664 static u32 tg3_get_rx_csum(struct net_device *dev)
8665 {
8666         struct tg3 *tp = netdev_priv(dev);
8667         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8668 }
8669
8670 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8671 {
8672         struct tg3 *tp = netdev_priv(dev);
8673
8674         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8675                 if (data != 0)
8676                         return -EINVAL;
8677                 return 0;
8678         }
8679
8680         spin_lock_bh(&tp->lock);
8681         if (data)
8682                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8683         else
8684                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8685         spin_unlock_bh(&tp->lock);
8686
8687         return 0;
8688 }
8689
8690 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8691 {
8692         struct tg3 *tp = netdev_priv(dev);
8693
8694         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8695                 if (data != 0)
8696                         return -EINVAL;
8697                 return 0;
8698         }
8699
8700         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8701             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8702             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8703             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8704                 ethtool_op_set_tx_ipv6_csum(dev, data);
8705         else
8706                 ethtool_op_set_tx_csum(dev, data);
8707
8708         return 0;
8709 }
8710
8711 static int tg3_get_sset_count (struct net_device *dev, int sset)
8712 {
8713         switch (sset) {
8714         case ETH_SS_TEST:
8715                 return TG3_NUM_TEST;
8716         case ETH_SS_STATS:
8717                 return TG3_NUM_STATS;
8718         default:
8719                 return -EOPNOTSUPP;
8720         }
8721 }
8722
8723 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8724 {
8725         switch (stringset) {
8726         case ETH_SS_STATS:
8727                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8728                 break;
8729         case ETH_SS_TEST:
8730                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8731                 break;
8732         default:
8733                 WARN_ON(1);     /* we need a WARN() */
8734                 break;
8735         }
8736 }
8737
8738 static int tg3_phys_id(struct net_device *dev, u32 data)
8739 {
8740         struct tg3 *tp = netdev_priv(dev);
8741         int i;
8742
8743         if (!netif_running(tp->dev))
8744                 return -EAGAIN;
8745
8746         if (data == 0)
8747                 data = 2;
8748
8749         for (i = 0; i < (data * 2); i++) {
8750                 if ((i % 2) == 0)
8751                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8752                                            LED_CTRL_1000MBPS_ON |
8753                                            LED_CTRL_100MBPS_ON |
8754                                            LED_CTRL_10MBPS_ON |
8755                                            LED_CTRL_TRAFFIC_OVERRIDE |
8756                                            LED_CTRL_TRAFFIC_BLINK |
8757                                            LED_CTRL_TRAFFIC_LED);
8758
8759                 else
8760                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8761                                            LED_CTRL_TRAFFIC_OVERRIDE);
8762
8763                 if (msleep_interruptible(500))
8764                         break;
8765         }
8766         tw32(MAC_LED_CTRL, tp->led_ctrl);
8767         return 0;
8768 }
8769
8770 static void tg3_get_ethtool_stats (struct net_device *dev,
8771                                    struct ethtool_stats *estats, u64 *tmp_stats)
8772 {
8773         struct tg3 *tp = netdev_priv(dev);
8774         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8775 }
8776
8777 #define NVRAM_TEST_SIZE 0x100
8778 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
8779 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
8780 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
8781 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8782 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8783
8784 static int tg3_test_nvram(struct tg3 *tp)
8785 {
8786         u32 csum, magic;
8787         __le32 *buf;
8788         int i, j, k, err = 0, size;
8789
8790         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8791                 return -EIO;
8792
8793         if (magic == TG3_EEPROM_MAGIC)
8794                 size = NVRAM_TEST_SIZE;
8795         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8796                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8797                     TG3_EEPROM_SB_FORMAT_1) {
8798                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8799                         case TG3_EEPROM_SB_REVISION_0:
8800                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8801                                 break;
8802                         case TG3_EEPROM_SB_REVISION_2:
8803                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8804                                 break;
8805                         case TG3_EEPROM_SB_REVISION_3:
8806                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8807                                 break;
8808                         default:
8809                                 return 0;
8810                         }
8811                 } else
8812                         return 0;
8813         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8814                 size = NVRAM_SELFBOOT_HW_SIZE;
8815         else
8816                 return -EIO;
8817
8818         buf = kmalloc(size, GFP_KERNEL);
8819         if (buf == NULL)
8820                 return -ENOMEM;
8821
8822         err = -EIO;
8823         for (i = 0, j = 0; i < size; i += 4, j++) {
8824                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
8825                         break;
8826         }
8827         if (i < size)
8828                 goto out;
8829
8830         /* Selfboot format */
8831         magic = swab32(le32_to_cpu(buf[0]));
8832         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
8833             TG3_EEPROM_MAGIC_FW) {
8834                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8835
8836                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
8837                     TG3_EEPROM_SB_REVISION_2) {
8838                         /* For rev 2, the csum doesn't include the MBA. */
8839                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
8840                                 csum8 += buf8[i];
8841                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
8842                                 csum8 += buf8[i];
8843                 } else {
8844                         for (i = 0; i < size; i++)
8845                                 csum8 += buf8[i];
8846                 }
8847
8848                 if (csum8 == 0) {
8849                         err = 0;
8850                         goto out;
8851                 }
8852
8853                 err = -EIO;
8854                 goto out;
8855         }
8856
8857         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
8858             TG3_EEPROM_MAGIC_HW) {
8859                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8860                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8861                 u8 *buf8 = (u8 *) buf;
8862
8863                 /* Separate the parity bits and the data bytes.  */
8864                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8865                         if ((i == 0) || (i == 8)) {
8866                                 int l;
8867                                 u8 msk;
8868
8869                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8870                                         parity[k++] = buf8[i] & msk;
8871                                 i++;
8872                         }
8873                         else if (i == 16) {
8874                                 int l;
8875                                 u8 msk;
8876
8877                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8878                                         parity[k++] = buf8[i] & msk;
8879                                 i++;
8880
8881                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8882                                         parity[k++] = buf8[i] & msk;
8883                                 i++;
8884                         }
8885                         data[j++] = buf8[i];
8886                 }
8887
8888                 err = -EIO;
8889                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8890                         u8 hw8 = hweight8(data[i]);
8891
8892                         if ((hw8 & 0x1) && parity[i])
8893                                 goto out;
8894                         else if (!(hw8 & 0x1) && !parity[i])
8895                                 goto out;
8896                 }
8897                 err = 0;
8898                 goto out;
8899         }
8900
8901         /* Bootstrap checksum at offset 0x10 */
8902         csum = calc_crc((unsigned char *) buf, 0x10);
8903         if(csum != le32_to_cpu(buf[0x10/4]))
8904                 goto out;
8905
8906         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8907         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8908         if (csum != le32_to_cpu(buf[0xfc/4]))
8909                  goto out;
8910
8911         err = 0;
8912
8913 out:
8914         kfree(buf);
8915         return err;
8916 }
8917
8918 #define TG3_SERDES_TIMEOUT_SEC  2
8919 #define TG3_COPPER_TIMEOUT_SEC  6
8920
8921 static int tg3_test_link(struct tg3 *tp)
8922 {
8923         int i, max;
8924
8925         if (!netif_running(tp->dev))
8926                 return -ENODEV;
8927
8928         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8929                 max = TG3_SERDES_TIMEOUT_SEC;
8930         else
8931                 max = TG3_COPPER_TIMEOUT_SEC;
8932
8933         for (i = 0; i < max; i++) {
8934                 if (netif_carrier_ok(tp->dev))
8935                         return 0;
8936
8937                 if (msleep_interruptible(1000))
8938                         break;
8939         }
8940
8941         return -EIO;
8942 }
8943
8944 /* Only test the commonly used registers */
8945 static int tg3_test_registers(struct tg3 *tp)
8946 {
8947         int i, is_5705, is_5750;
8948         u32 offset, read_mask, write_mask, val, save_val, read_val;
8949         static struct {
8950                 u16 offset;
8951                 u16 flags;
8952 #define TG3_FL_5705     0x1
8953 #define TG3_FL_NOT_5705 0x2
8954 #define TG3_FL_NOT_5788 0x4
8955 #define TG3_FL_NOT_5750 0x8
8956                 u32 read_mask;
8957                 u32 write_mask;
8958         } reg_tbl[] = {
8959                 /* MAC Control Registers */
8960                 { MAC_MODE, TG3_FL_NOT_5705,
8961                         0x00000000, 0x00ef6f8c },
8962                 { MAC_MODE, TG3_FL_5705,
8963                         0x00000000, 0x01ef6b8c },
8964                 { MAC_STATUS, TG3_FL_NOT_5705,
8965                         0x03800107, 0x00000000 },
8966                 { MAC_STATUS, TG3_FL_5705,
8967                         0x03800100, 0x00000000 },
8968                 { MAC_ADDR_0_HIGH, 0x0000,
8969                         0x00000000, 0x0000ffff },
8970                 { MAC_ADDR_0_LOW, 0x0000,
8971                         0x00000000, 0xffffffff },
8972                 { MAC_RX_MTU_SIZE, 0x0000,
8973                         0x00000000, 0x0000ffff },
8974                 { MAC_TX_MODE, 0x0000,
8975                         0x00000000, 0x00000070 },
8976                 { MAC_TX_LENGTHS, 0x0000,
8977                         0x00000000, 0x00003fff },
8978                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8979                         0x00000000, 0x000007fc },
8980                 { MAC_RX_MODE, TG3_FL_5705,
8981                         0x00000000, 0x000007dc },
8982                 { MAC_HASH_REG_0, 0x0000,
8983                         0x00000000, 0xffffffff },
8984                 { MAC_HASH_REG_1, 0x0000,
8985                         0x00000000, 0xffffffff },
8986                 { MAC_HASH_REG_2, 0x0000,
8987                         0x00000000, 0xffffffff },
8988                 { MAC_HASH_REG_3, 0x0000,
8989                         0x00000000, 0xffffffff },
8990
8991                 /* Receive Data and Receive BD Initiator Control Registers. */
8992                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8993                         0x00000000, 0xffffffff },
8994                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8995                         0x00000000, 0xffffffff },
8996                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8997                         0x00000000, 0x00000003 },
8998                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8999                         0x00000000, 0xffffffff },
9000                 { RCVDBDI_STD_BD+0, 0x0000,
9001                         0x00000000, 0xffffffff },
9002                 { RCVDBDI_STD_BD+4, 0x0000,
9003                         0x00000000, 0xffffffff },
9004                 { RCVDBDI_STD_BD+8, 0x0000,
9005                         0x00000000, 0xffff0002 },
9006                 { RCVDBDI_STD_BD+0xc, 0x0000,
9007                         0x00000000, 0xffffffff },
9008
9009                 /* Receive BD Initiator Control Registers. */
9010                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9011                         0x00000000, 0xffffffff },
9012                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9013                         0x00000000, 0x000003ff },
9014                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9015                         0x00000000, 0xffffffff },
9016
9017                 /* Host Coalescing Control Registers. */
9018                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9019                         0x00000000, 0x00000004 },
9020                 { HOSTCC_MODE, TG3_FL_5705,
9021                         0x00000000, 0x000000f6 },
9022                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9023                         0x00000000, 0xffffffff },
9024                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9025                         0x00000000, 0x000003ff },
9026                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9027                         0x00000000, 0xffffffff },
9028                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9029                         0x00000000, 0x000003ff },
9030                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9031                         0x00000000, 0xffffffff },
9032                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9033                         0x00000000, 0x000000ff },
9034                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9035                         0x00000000, 0xffffffff },
9036                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9037                         0x00000000, 0x000000ff },
9038                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9039                         0x00000000, 0xffffffff },
9040                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9041                         0x00000000, 0xffffffff },
9042                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9043                         0x00000000, 0xffffffff },
9044                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9045                         0x00000000, 0x000000ff },
9046                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9047                         0x00000000, 0xffffffff },
9048                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9049                         0x00000000, 0x000000ff },
9050                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9051                         0x00000000, 0xffffffff },
9052                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9053                         0x00000000, 0xffffffff },
9054                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9055                         0x00000000, 0xffffffff },
9056                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9057                         0x00000000, 0xffffffff },
9058                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9059                         0x00000000, 0xffffffff },
9060                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9061                         0xffffffff, 0x00000000 },
9062                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9063                         0xffffffff, 0x00000000 },
9064
9065                 /* Buffer Manager Control Registers. */
9066                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9067                         0x00000000, 0x007fff80 },
9068                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9069                         0x00000000, 0x007fffff },
9070                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9071                         0x00000000, 0x0000003f },
9072                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9073                         0x00000000, 0x000001ff },
9074                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9075                         0x00000000, 0x000001ff },
9076                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9077                         0xffffffff, 0x00000000 },
9078                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9079                         0xffffffff, 0x00000000 },
9080
9081                 /* Mailbox Registers */
9082                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9083                         0x00000000, 0x000001ff },
9084                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9085                         0x00000000, 0x000001ff },
9086                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9087                         0x00000000, 0x000007ff },
9088                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9089                         0x00000000, 0x000001ff },
9090
9091                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9092         };
9093
9094         is_5705 = is_5750 = 0;
9095         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9096                 is_5705 = 1;
9097                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9098                         is_5750 = 1;
9099         }
9100
9101         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9102                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9103                         continue;
9104
9105                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9106                         continue;
9107
9108                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9109                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9110                         continue;
9111
9112                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9113                         continue;
9114
9115                 offset = (u32) reg_tbl[i].offset;
9116                 read_mask = reg_tbl[i].read_mask;
9117                 write_mask = reg_tbl[i].write_mask;
9118
9119                 /* Save the original register content */
9120                 save_val = tr32(offset);
9121
9122                 /* Determine the read-only value. */
9123                 read_val = save_val & read_mask;
9124
9125                 /* Write zero to the register, then make sure the read-only bits
9126                  * are not changed and the read/write bits are all zeros.
9127                  */
9128                 tw32(offset, 0);
9129
9130                 val = tr32(offset);
9131
9132                 /* Test the read-only and read/write bits. */
9133                 if (((val & read_mask) != read_val) || (val & write_mask))
9134                         goto out;
9135
9136                 /* Write ones to all the bits defined by RdMask and WrMask, then
9137                  * make sure the read-only bits are not changed and the
9138                  * read/write bits are all ones.
9139                  */
9140                 tw32(offset, read_mask | write_mask);
9141
9142                 val = tr32(offset);
9143
9144                 /* Test the read-only bits. */
9145                 if ((val & read_mask) != read_val)
9146                         goto out;
9147
9148                 /* Test the read/write bits. */
9149                 if ((val & write_mask) != write_mask)
9150                         goto out;
9151
9152                 tw32(offset, save_val);
9153         }
9154
9155         return 0;
9156
9157 out:
9158         if (netif_msg_hw(tp))
9159                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9160                        offset);
9161         tw32(offset, save_val);
9162         return -EIO;
9163 }
9164
9165 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9166 {
9167         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9168         int i;
9169         u32 j;
9170
9171         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9172                 for (j = 0; j < len; j += 4) {
9173                         u32 val;
9174
9175                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9176                         tg3_read_mem(tp, offset + j, &val);
9177                         if (val != test_pattern[i])
9178                                 return -EIO;
9179                 }
9180         }
9181         return 0;
9182 }
9183
9184 static int tg3_test_memory(struct tg3 *tp)
9185 {
9186         static struct mem_entry {
9187                 u32 offset;
9188                 u32 len;
9189         } mem_tbl_570x[] = {
9190                 { 0x00000000, 0x00b50},
9191                 { 0x00002000, 0x1c000},
9192                 { 0xffffffff, 0x00000}
9193         }, mem_tbl_5705[] = {
9194                 { 0x00000100, 0x0000c},
9195                 { 0x00000200, 0x00008},
9196                 { 0x00004000, 0x00800},
9197                 { 0x00006000, 0x01000},
9198                 { 0x00008000, 0x02000},
9199                 { 0x00010000, 0x0e000},
9200                 { 0xffffffff, 0x00000}
9201         }, mem_tbl_5755[] = {
9202                 { 0x00000200, 0x00008},
9203                 { 0x00004000, 0x00800},
9204                 { 0x00006000, 0x00800},
9205                 { 0x00008000, 0x02000},
9206                 { 0x00010000, 0x0c000},
9207                 { 0xffffffff, 0x00000}
9208         }, mem_tbl_5906[] = {
9209                 { 0x00000200, 0x00008},
9210                 { 0x00004000, 0x00400},
9211                 { 0x00006000, 0x00400},
9212                 { 0x00008000, 0x01000},
9213                 { 0x00010000, 0x01000},
9214                 { 0xffffffff, 0x00000}
9215         };
9216         struct mem_entry *mem_tbl;
9217         int err = 0;
9218         int i;
9219
9220         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9221                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9222                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9223                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9224                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9225                         mem_tbl = mem_tbl_5755;
9226                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9227                         mem_tbl = mem_tbl_5906;
9228                 else
9229                         mem_tbl = mem_tbl_5705;
9230         } else
9231                 mem_tbl = mem_tbl_570x;
9232
9233         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9234                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9235                     mem_tbl[i].len)) != 0)
9236                         break;
9237         }
9238
9239         return err;
9240 }
9241
9242 #define TG3_MAC_LOOPBACK        0
9243 #define TG3_PHY_LOOPBACK        1
9244
9245 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9246 {
9247         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9248         u32 desc_idx;
9249         struct sk_buff *skb, *rx_skb;
9250         u8 *tx_data;
9251         dma_addr_t map;
9252         int num_pkts, tx_len, rx_len, i, err;
9253         struct tg3_rx_buffer_desc *desc;
9254
9255         if (loopback_mode == TG3_MAC_LOOPBACK) {
9256                 /* HW errata - mac loopback fails in some cases on 5780.
9257                  * Normal traffic and PHY loopback are not affected by
9258                  * errata.
9259                  */
9260                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9261                         return 0;
9262
9263                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9264                            MAC_MODE_PORT_INT_LPBACK;
9265                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9266                         mac_mode |= MAC_MODE_LINK_POLARITY;
9267                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9268                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9269                 else
9270                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9271                 tw32(MAC_MODE, mac_mode);
9272         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9273                 u32 val;
9274
9275                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9276                         u32 phytest;
9277
9278                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9279                                 u32 phy;
9280
9281                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9282                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9283                                 if (!tg3_readphy(tp, 0x1b, &phy))
9284                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9285                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9286                         }
9287                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9288                 } else
9289                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9290
9291                 tg3_phy_toggle_automdix(tp, 0);
9292
9293                 tg3_writephy(tp, MII_BMCR, val);
9294                 udelay(40);
9295
9296                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9297                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9298                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9299                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9300                 } else
9301                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9302
9303                 /* reset to prevent losing 1st rx packet intermittently */
9304                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9305                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9306                         udelay(10);
9307                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9308                 }
9309                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9310                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9311                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9312                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9313                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9314                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9315                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9316                 }
9317                 tw32(MAC_MODE, mac_mode);
9318         }
9319         else
9320                 return -EINVAL;
9321
9322         err = -EIO;
9323
9324         tx_len = 1514;
9325         skb = netdev_alloc_skb(tp->dev, tx_len);
9326         if (!skb)
9327                 return -ENOMEM;
9328
9329         tx_data = skb_put(skb, tx_len);
9330         memcpy(tx_data, tp->dev->dev_addr, 6);
9331         memset(tx_data + 6, 0x0, 8);
9332
9333         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9334
9335         for (i = 14; i < tx_len; i++)
9336                 tx_data[i] = (u8) (i & 0xff);
9337
9338         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9339
9340         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9341              HOSTCC_MODE_NOW);
9342
9343         udelay(10);
9344
9345         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9346
9347         num_pkts = 0;
9348
9349         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9350
9351         tp->tx_prod++;
9352         num_pkts++;
9353
9354         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9355                      tp->tx_prod);
9356         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9357
9358         udelay(10);
9359
9360         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9361         for (i = 0; i < 25; i++) {
9362                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9363                        HOSTCC_MODE_NOW);
9364
9365                 udelay(10);
9366
9367                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9368                 rx_idx = tp->hw_status->idx[0].rx_producer;
9369                 if ((tx_idx == tp->tx_prod) &&
9370                     (rx_idx == (rx_start_idx + num_pkts)))
9371                         break;
9372         }
9373
9374         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9375         dev_kfree_skb(skb);
9376
9377         if (tx_idx != tp->tx_prod)
9378                 goto out;
9379
9380         if (rx_idx != rx_start_idx + num_pkts)
9381                 goto out;
9382
9383         desc = &tp->rx_rcb[rx_start_idx];
9384         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9385         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9386         if (opaque_key != RXD_OPAQUE_RING_STD)
9387                 goto out;
9388
9389         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9390             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9391                 goto out;
9392
9393         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9394         if (rx_len != tx_len)
9395                 goto out;
9396
9397         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9398
9399         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9400         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9401
9402         for (i = 14; i < tx_len; i++) {
9403                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9404                         goto out;
9405         }
9406         err = 0;
9407
9408         /* tg3_free_rings will unmap and free the rx_skb */
9409 out:
9410         return err;
9411 }
9412
9413 #define TG3_MAC_LOOPBACK_FAILED         1
9414 #define TG3_PHY_LOOPBACK_FAILED         2
9415 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9416                                          TG3_PHY_LOOPBACK_FAILED)
9417
9418 static int tg3_test_loopback(struct tg3 *tp)
9419 {
9420         int err = 0;
9421         u32 cpmuctrl = 0;
9422
9423         if (!netif_running(tp->dev))
9424                 return TG3_LOOPBACK_FAILED;
9425
9426         err = tg3_reset_hw(tp, 1);
9427         if (err)
9428                 return TG3_LOOPBACK_FAILED;
9429
9430         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
9431                 int i;
9432                 u32 status;
9433
9434                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9435
9436                 /* Wait for up to 40 microseconds to acquire lock. */
9437                 for (i = 0; i < 4; i++) {
9438                         status = tr32(TG3_CPMU_MUTEX_GNT);
9439                         if (status == CPMU_MUTEX_GNT_DRIVER)
9440                                 break;
9441                         udelay(10);
9442                 }
9443
9444                 if (status != CPMU_MUTEX_GNT_DRIVER)
9445                         return TG3_LOOPBACK_FAILED;
9446
9447                 /* Turn off power management based on link speed. */
9448                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9449                 tw32(TG3_CPMU_CTRL,
9450                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9451                                   CPMU_CTRL_LINK_AWARE_MODE));
9452         }
9453
9454         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9455                 err |= TG3_MAC_LOOPBACK_FAILED;
9456
9457         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
9458                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9459
9460                 /* Release the mutex */
9461                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9462         }
9463
9464         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9465                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9466                         err |= TG3_PHY_LOOPBACK_FAILED;
9467         }
9468
9469         return err;
9470 }
9471
9472 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9473                           u64 *data)
9474 {
9475         struct tg3 *tp = netdev_priv(dev);
9476
9477         if (tp->link_config.phy_is_low_power)
9478                 tg3_set_power_state(tp, PCI_D0);
9479
9480         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9481
9482         if (tg3_test_nvram(tp) != 0) {
9483                 etest->flags |= ETH_TEST_FL_FAILED;
9484                 data[0] = 1;
9485         }
9486         if (tg3_test_link(tp) != 0) {
9487                 etest->flags |= ETH_TEST_FL_FAILED;
9488                 data[1] = 1;
9489         }
9490         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9491                 int err, irq_sync = 0;
9492
9493                 if (netif_running(dev)) {
9494                         tg3_netif_stop(tp);
9495                         irq_sync = 1;
9496                 }
9497
9498                 tg3_full_lock(tp, irq_sync);
9499
9500                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9501                 err = tg3_nvram_lock(tp);
9502                 tg3_halt_cpu(tp, RX_CPU_BASE);
9503                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9504                         tg3_halt_cpu(tp, TX_CPU_BASE);
9505                 if (!err)
9506                         tg3_nvram_unlock(tp);
9507
9508                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9509                         tg3_phy_reset(tp);
9510
9511                 if (tg3_test_registers(tp) != 0) {
9512                         etest->flags |= ETH_TEST_FL_FAILED;
9513                         data[2] = 1;
9514                 }
9515                 if (tg3_test_memory(tp) != 0) {
9516                         etest->flags |= ETH_TEST_FL_FAILED;
9517                         data[3] = 1;
9518                 }
9519                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9520                         etest->flags |= ETH_TEST_FL_FAILED;
9521
9522                 tg3_full_unlock(tp);
9523
9524                 if (tg3_test_interrupt(tp) != 0) {
9525                         etest->flags |= ETH_TEST_FL_FAILED;
9526                         data[5] = 1;
9527                 }
9528
9529                 tg3_full_lock(tp, 0);
9530
9531                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9532                 if (netif_running(dev)) {
9533                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9534                         if (!tg3_restart_hw(tp, 1))
9535                                 tg3_netif_start(tp);
9536                 }
9537
9538                 tg3_full_unlock(tp);
9539         }
9540         if (tp->link_config.phy_is_low_power)
9541                 tg3_set_power_state(tp, PCI_D3hot);
9542
9543 }
9544
9545 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9546 {
9547         struct mii_ioctl_data *data = if_mii(ifr);
9548         struct tg3 *tp = netdev_priv(dev);
9549         int err;
9550
9551         switch(cmd) {
9552         case SIOCGMIIPHY:
9553                 data->phy_id = PHY_ADDR;
9554
9555                 /* fallthru */
9556         case SIOCGMIIREG: {
9557                 u32 mii_regval;
9558
9559                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9560                         break;                  /* We have no PHY */
9561
9562                 if (tp->link_config.phy_is_low_power)
9563                         return -EAGAIN;
9564
9565                 spin_lock_bh(&tp->lock);
9566                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9567                 spin_unlock_bh(&tp->lock);
9568
9569                 data->val_out = mii_regval;
9570
9571                 return err;
9572         }
9573
9574         case SIOCSMIIREG:
9575                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9576                         break;                  /* We have no PHY */
9577
9578                 if (!capable(CAP_NET_ADMIN))
9579                         return -EPERM;
9580
9581                 if (tp->link_config.phy_is_low_power)
9582                         return -EAGAIN;
9583
9584                 spin_lock_bh(&tp->lock);
9585                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9586                 spin_unlock_bh(&tp->lock);
9587
9588                 return err;
9589
9590         default:
9591                 /* do nothing */
9592                 break;
9593         }
9594         return -EOPNOTSUPP;
9595 }
9596
9597 #if TG3_VLAN_TAG_USED
9598 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9599 {
9600         struct tg3 *tp = netdev_priv(dev);
9601
9602         if (netif_running(dev))
9603                 tg3_netif_stop(tp);
9604
9605         tg3_full_lock(tp, 0);
9606
9607         tp->vlgrp = grp;
9608
9609         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9610         __tg3_set_rx_mode(dev);
9611
9612         if (netif_running(dev))
9613                 tg3_netif_start(tp);
9614
9615         tg3_full_unlock(tp);
9616 }
9617 #endif
9618
9619 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9620 {
9621         struct tg3 *tp = netdev_priv(dev);
9622
9623         memcpy(ec, &tp->coal, sizeof(*ec));
9624         return 0;
9625 }
9626
9627 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9628 {
9629         struct tg3 *tp = netdev_priv(dev);
9630         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9631         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9632
9633         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9634                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9635                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9636                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9637                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9638         }
9639
9640         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9641             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9642             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9643             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9644             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9645             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9646             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9647             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9648             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9649             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9650                 return -EINVAL;
9651
9652         /* No rx interrupts will be generated if both are zero */
9653         if ((ec->rx_coalesce_usecs == 0) &&
9654             (ec->rx_max_coalesced_frames == 0))
9655                 return -EINVAL;
9656
9657         /* No tx interrupts will be generated if both are zero */
9658         if ((ec->tx_coalesce_usecs == 0) &&
9659             (ec->tx_max_coalesced_frames == 0))
9660                 return -EINVAL;
9661
9662         /* Only copy relevant parameters, ignore all others. */
9663         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9664         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9665         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9666         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9667         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9668         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9669         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9670         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9671         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9672
9673         if (netif_running(dev)) {
9674                 tg3_full_lock(tp, 0);
9675                 __tg3_set_coalesce(tp, &tp->coal);
9676                 tg3_full_unlock(tp);
9677         }
9678         return 0;
9679 }
9680
9681 static const struct ethtool_ops tg3_ethtool_ops = {
9682         .get_settings           = tg3_get_settings,
9683         .set_settings           = tg3_set_settings,
9684         .get_drvinfo            = tg3_get_drvinfo,
9685         .get_regs_len           = tg3_get_regs_len,
9686         .get_regs               = tg3_get_regs,
9687         .get_wol                = tg3_get_wol,
9688         .set_wol                = tg3_set_wol,
9689         .get_msglevel           = tg3_get_msglevel,
9690         .set_msglevel           = tg3_set_msglevel,
9691         .nway_reset             = tg3_nway_reset,
9692         .get_link               = ethtool_op_get_link,
9693         .get_eeprom_len         = tg3_get_eeprom_len,
9694         .get_eeprom             = tg3_get_eeprom,
9695         .set_eeprom             = tg3_set_eeprom,
9696         .get_ringparam          = tg3_get_ringparam,
9697         .set_ringparam          = tg3_set_ringparam,
9698         .get_pauseparam         = tg3_get_pauseparam,
9699         .set_pauseparam         = tg3_set_pauseparam,
9700         .get_rx_csum            = tg3_get_rx_csum,
9701         .set_rx_csum            = tg3_set_rx_csum,
9702         .set_tx_csum            = tg3_set_tx_csum,
9703         .set_sg                 = ethtool_op_set_sg,
9704         .set_tso                = tg3_set_tso,
9705         .self_test              = tg3_self_test,
9706         .get_strings            = tg3_get_strings,
9707         .phys_id                = tg3_phys_id,
9708         .get_ethtool_stats      = tg3_get_ethtool_stats,
9709         .get_coalesce           = tg3_get_coalesce,
9710         .set_coalesce           = tg3_set_coalesce,
9711         .get_sset_count         = tg3_get_sset_count,
9712 };
9713
9714 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9715 {
9716         u32 cursize, val, magic;
9717
9718         tp->nvram_size = EEPROM_CHIP_SIZE;
9719
9720         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9721                 return;
9722
9723         if ((magic != TG3_EEPROM_MAGIC) &&
9724             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9725             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9726                 return;
9727
9728         /*
9729          * Size the chip by reading offsets at increasing powers of two.
9730          * When we encounter our validation signature, we know the addressing
9731          * has wrapped around, and thus have our chip size.
9732          */
9733         cursize = 0x10;
9734
9735         while (cursize < tp->nvram_size) {
9736                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9737                         return;
9738
9739                 if (val == magic)
9740                         break;
9741
9742                 cursize <<= 1;
9743         }
9744
9745         tp->nvram_size = cursize;
9746 }
9747
9748 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9749 {
9750         u32 val;
9751
9752         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9753                 return;
9754
9755         /* Selfboot format */
9756         if (val != TG3_EEPROM_MAGIC) {
9757                 tg3_get_eeprom_size(tp);
9758                 return;
9759         }
9760
9761         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9762                 if (val != 0) {
9763                         tp->nvram_size = (val >> 16) * 1024;
9764                         return;
9765                 }
9766         }
9767         tp->nvram_size = 0x80000;
9768 }
9769
9770 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9771 {
9772         u32 nvcfg1;
9773
9774         nvcfg1 = tr32(NVRAM_CFG1);
9775         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9776                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9777         }
9778         else {
9779                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9780                 tw32(NVRAM_CFG1, nvcfg1);
9781         }
9782
9783         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9784             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9785                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9786                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9787                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9788                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9789                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9790                                 break;
9791                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9792                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9793                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9794                                 break;
9795                         case FLASH_VENDOR_ATMEL_EEPROM:
9796                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9797                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9798                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9799                                 break;
9800                         case FLASH_VENDOR_ST:
9801                                 tp->nvram_jedecnum = JEDEC_ST;
9802                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9803                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9804                                 break;
9805                         case FLASH_VENDOR_SAIFUN:
9806                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9807                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9808                                 break;
9809                         case FLASH_VENDOR_SST_SMALL:
9810                         case FLASH_VENDOR_SST_LARGE:
9811                                 tp->nvram_jedecnum = JEDEC_SST;
9812                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9813                                 break;
9814                 }
9815         }
9816         else {
9817                 tp->nvram_jedecnum = JEDEC_ATMEL;
9818                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9819                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9820         }
9821 }
9822
9823 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9824 {
9825         u32 nvcfg1;
9826
9827         nvcfg1 = tr32(NVRAM_CFG1);
9828
9829         /* NVRAM protection for TPM */
9830         if (nvcfg1 & (1 << 27))
9831                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9832
9833         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9834                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9835                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9836                         tp->nvram_jedecnum = JEDEC_ATMEL;
9837                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9838                         break;
9839                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9840                         tp->nvram_jedecnum = JEDEC_ATMEL;
9841                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9842                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9843                         break;
9844                 case FLASH_5752VENDOR_ST_M45PE10:
9845                 case FLASH_5752VENDOR_ST_M45PE20:
9846                 case FLASH_5752VENDOR_ST_M45PE40:
9847                         tp->nvram_jedecnum = JEDEC_ST;
9848                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9849                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9850                         break;
9851         }
9852
9853         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9854                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9855                         case FLASH_5752PAGE_SIZE_256:
9856                                 tp->nvram_pagesize = 256;
9857                                 break;
9858                         case FLASH_5752PAGE_SIZE_512:
9859                                 tp->nvram_pagesize = 512;
9860                                 break;
9861                         case FLASH_5752PAGE_SIZE_1K:
9862                                 tp->nvram_pagesize = 1024;
9863                                 break;
9864                         case FLASH_5752PAGE_SIZE_2K:
9865                                 tp->nvram_pagesize = 2048;
9866                                 break;
9867                         case FLASH_5752PAGE_SIZE_4K:
9868                                 tp->nvram_pagesize = 4096;
9869                                 break;
9870                         case FLASH_5752PAGE_SIZE_264:
9871                                 tp->nvram_pagesize = 264;
9872                                 break;
9873                 }
9874         }
9875         else {
9876                 /* For eeprom, set pagesize to maximum eeprom size */
9877                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9878
9879                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9880                 tw32(NVRAM_CFG1, nvcfg1);
9881         }
9882 }
9883
9884 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9885 {
9886         u32 nvcfg1, protect = 0;
9887
9888         nvcfg1 = tr32(NVRAM_CFG1);
9889
9890         /* NVRAM protection for TPM */
9891         if (nvcfg1 & (1 << 27)) {
9892                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9893                 protect = 1;
9894         }
9895
9896         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9897         switch (nvcfg1) {
9898                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9899                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9900                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9901                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9902                         tp->nvram_jedecnum = JEDEC_ATMEL;
9903                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9904                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9905                         tp->nvram_pagesize = 264;
9906                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9907                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9908                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9909                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9910                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9911                         else
9912                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9913                         break;
9914                 case FLASH_5752VENDOR_ST_M45PE10:
9915                 case FLASH_5752VENDOR_ST_M45PE20:
9916                 case FLASH_5752VENDOR_ST_M45PE40:
9917                         tp->nvram_jedecnum = JEDEC_ST;
9918                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9919                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9920                         tp->nvram_pagesize = 256;
9921                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9922                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9923                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9924                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9925                         else
9926                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9927                         break;
9928         }
9929 }
9930
9931 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9932 {
9933         u32 nvcfg1;
9934
9935         nvcfg1 = tr32(NVRAM_CFG1);
9936
9937         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9938                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9939                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9940                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9941                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9942                         tp->nvram_jedecnum = JEDEC_ATMEL;
9943                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9944                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9945
9946                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9947                         tw32(NVRAM_CFG1, nvcfg1);
9948                         break;
9949                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9950                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9951                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9952                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9953                         tp->nvram_jedecnum = JEDEC_ATMEL;
9954                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9955                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9956                         tp->nvram_pagesize = 264;
9957                         break;
9958                 case FLASH_5752VENDOR_ST_M45PE10:
9959                 case FLASH_5752VENDOR_ST_M45PE20:
9960                 case FLASH_5752VENDOR_ST_M45PE40:
9961                         tp->nvram_jedecnum = JEDEC_ST;
9962                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9963                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9964                         tp->nvram_pagesize = 256;
9965                         break;
9966         }
9967 }
9968
9969 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9970 {
9971         u32 nvcfg1, protect = 0;
9972
9973         nvcfg1 = tr32(NVRAM_CFG1);
9974
9975         /* NVRAM protection for TPM */
9976         if (nvcfg1 & (1 << 27)) {
9977                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9978                 protect = 1;
9979         }
9980
9981         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9982         switch (nvcfg1) {
9983                 case FLASH_5761VENDOR_ATMEL_ADB021D:
9984                 case FLASH_5761VENDOR_ATMEL_ADB041D:
9985                 case FLASH_5761VENDOR_ATMEL_ADB081D:
9986                 case FLASH_5761VENDOR_ATMEL_ADB161D:
9987                 case FLASH_5761VENDOR_ATMEL_MDB021D:
9988                 case FLASH_5761VENDOR_ATMEL_MDB041D:
9989                 case FLASH_5761VENDOR_ATMEL_MDB081D:
9990                 case FLASH_5761VENDOR_ATMEL_MDB161D:
9991                         tp->nvram_jedecnum = JEDEC_ATMEL;
9992                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9993                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9994                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9995                         tp->nvram_pagesize = 256;
9996                         break;
9997                 case FLASH_5761VENDOR_ST_A_M45PE20:
9998                 case FLASH_5761VENDOR_ST_A_M45PE40:
9999                 case FLASH_5761VENDOR_ST_A_M45PE80:
10000                 case FLASH_5761VENDOR_ST_A_M45PE16:
10001                 case FLASH_5761VENDOR_ST_M_M45PE20:
10002                 case FLASH_5761VENDOR_ST_M_M45PE40:
10003                 case FLASH_5761VENDOR_ST_M_M45PE80:
10004                 case FLASH_5761VENDOR_ST_M_M45PE16:
10005                         tp->nvram_jedecnum = JEDEC_ST;
10006                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10007                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10008                         tp->nvram_pagesize = 256;
10009                         break;
10010         }
10011
10012         if (protect) {
10013                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10014         } else {
10015                 switch (nvcfg1) {
10016                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10017                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10018                         case FLASH_5761VENDOR_ST_A_M45PE16:
10019                         case FLASH_5761VENDOR_ST_M_M45PE16:
10020                                 tp->nvram_size = 0x100000;
10021                                 break;
10022                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10023                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10024                         case FLASH_5761VENDOR_ST_A_M45PE80:
10025                         case FLASH_5761VENDOR_ST_M_M45PE80:
10026                                 tp->nvram_size = 0x80000;
10027                                 break;
10028                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10029                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10030                         case FLASH_5761VENDOR_ST_A_M45PE40:
10031                         case FLASH_5761VENDOR_ST_M_M45PE40:
10032                                 tp->nvram_size = 0x40000;
10033                                 break;
10034                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10035                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10036                         case FLASH_5761VENDOR_ST_A_M45PE20:
10037                         case FLASH_5761VENDOR_ST_M_M45PE20:
10038                                 tp->nvram_size = 0x20000;
10039                                 break;
10040                 }
10041         }
10042 }
10043
10044 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10045 {
10046         tp->nvram_jedecnum = JEDEC_ATMEL;
10047         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10048         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10049 }
10050
10051 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10052 static void __devinit tg3_nvram_init(struct tg3 *tp)
10053 {
10054         tw32_f(GRC_EEPROM_ADDR,
10055              (EEPROM_ADDR_FSM_RESET |
10056               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10057                EEPROM_ADDR_CLKPERD_SHIFT)));
10058
10059         msleep(1);
10060
10061         /* Enable seeprom accesses. */
10062         tw32_f(GRC_LOCAL_CTRL,
10063              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10064         udelay(100);
10065
10066         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10067             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10068                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10069
10070                 if (tg3_nvram_lock(tp)) {
10071                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10072                                "tg3_nvram_init failed.\n", tp->dev->name);
10073                         return;
10074                 }
10075                 tg3_enable_nvram_access(tp);
10076
10077                 tp->nvram_size = 0;
10078
10079                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10080                         tg3_get_5752_nvram_info(tp);
10081                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10082                         tg3_get_5755_nvram_info(tp);
10083                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10084                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10085                         tg3_get_5787_nvram_info(tp);
10086                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10087                         tg3_get_5761_nvram_info(tp);
10088                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10089                         tg3_get_5906_nvram_info(tp);
10090                 else
10091                         tg3_get_nvram_info(tp);
10092
10093                 if (tp->nvram_size == 0)
10094                         tg3_get_nvram_size(tp);
10095
10096                 tg3_disable_nvram_access(tp);
10097                 tg3_nvram_unlock(tp);
10098
10099         } else {
10100                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10101
10102                 tg3_get_eeprom_size(tp);
10103         }
10104 }
10105
10106 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10107                                         u32 offset, u32 *val)
10108 {
10109         u32 tmp;
10110         int i;
10111
10112         if (offset > EEPROM_ADDR_ADDR_MASK ||
10113             (offset % 4) != 0)
10114                 return -EINVAL;
10115
10116         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10117                                         EEPROM_ADDR_DEVID_MASK |
10118                                         EEPROM_ADDR_READ);
10119         tw32(GRC_EEPROM_ADDR,
10120              tmp |
10121              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10122              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10123               EEPROM_ADDR_ADDR_MASK) |
10124              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10125
10126         for (i = 0; i < 1000; i++) {
10127                 tmp = tr32(GRC_EEPROM_ADDR);
10128
10129                 if (tmp & EEPROM_ADDR_COMPLETE)
10130                         break;
10131                 msleep(1);
10132         }
10133         if (!(tmp & EEPROM_ADDR_COMPLETE))
10134                 return -EBUSY;
10135
10136         *val = tr32(GRC_EEPROM_DATA);
10137         return 0;
10138 }
10139
10140 #define NVRAM_CMD_TIMEOUT 10000
10141
10142 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10143 {
10144         int i;
10145
10146         tw32(NVRAM_CMD, nvram_cmd);
10147         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10148                 udelay(10);
10149                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10150                         udelay(10);
10151                         break;
10152                 }
10153         }
10154         if (i == NVRAM_CMD_TIMEOUT) {
10155                 return -EBUSY;
10156         }
10157         return 0;
10158 }
10159
10160 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10161 {
10162         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10163             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10164             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10165            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10166             (tp->nvram_jedecnum == JEDEC_ATMEL))
10167
10168                 addr = ((addr / tp->nvram_pagesize) <<
10169                         ATMEL_AT45DB0X1B_PAGE_POS) +
10170                        (addr % tp->nvram_pagesize);
10171
10172         return addr;
10173 }
10174
10175 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10176 {
10177         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10178             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10179             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10180            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10181             (tp->nvram_jedecnum == JEDEC_ATMEL))
10182
10183                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10184                         tp->nvram_pagesize) +
10185                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10186
10187         return addr;
10188 }
10189
10190 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10191 {
10192         int ret;
10193
10194         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10195                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10196
10197         offset = tg3_nvram_phys_addr(tp, offset);
10198
10199         if (offset > NVRAM_ADDR_MSK)
10200                 return -EINVAL;
10201
10202         ret = tg3_nvram_lock(tp);
10203         if (ret)
10204                 return ret;
10205
10206         tg3_enable_nvram_access(tp);
10207
10208         tw32(NVRAM_ADDR, offset);
10209         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10210                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10211
10212         if (ret == 0)
10213                 *val = swab32(tr32(NVRAM_RDDATA));
10214
10215         tg3_disable_nvram_access(tp);
10216
10217         tg3_nvram_unlock(tp);
10218
10219         return ret;
10220 }
10221
10222 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10223 {
10224         u32 v;
10225         int res = tg3_nvram_read(tp, offset, &v);
10226         if (!res)
10227                 *val = cpu_to_le32(v);
10228         return res;
10229 }
10230
10231 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10232 {
10233         int err;
10234         u32 tmp;
10235
10236         err = tg3_nvram_read(tp, offset, &tmp);
10237         *val = swab32(tmp);
10238         return err;
10239 }
10240
10241 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10242                                     u32 offset, u32 len, u8 *buf)
10243 {
10244         int i, j, rc = 0;
10245         u32 val;
10246
10247         for (i = 0; i < len; i += 4) {
10248                 u32 addr;
10249                 __le32 data;
10250
10251                 addr = offset + i;
10252
10253                 memcpy(&data, buf + i, 4);
10254
10255                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10256
10257                 val = tr32(GRC_EEPROM_ADDR);
10258                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10259
10260                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10261                         EEPROM_ADDR_READ);
10262                 tw32(GRC_EEPROM_ADDR, val |
10263                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10264                         (addr & EEPROM_ADDR_ADDR_MASK) |
10265                         EEPROM_ADDR_START |
10266                         EEPROM_ADDR_WRITE);
10267
10268                 for (j = 0; j < 1000; j++) {
10269                         val = tr32(GRC_EEPROM_ADDR);
10270
10271                         if (val & EEPROM_ADDR_COMPLETE)
10272                                 break;
10273                         msleep(1);
10274                 }
10275                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10276                         rc = -EBUSY;
10277                         break;
10278                 }
10279         }
10280
10281         return rc;
10282 }
10283
10284 /* offset and length are dword aligned */
10285 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10286                 u8 *buf)
10287 {
10288         int ret = 0;
10289         u32 pagesize = tp->nvram_pagesize;
10290         u32 pagemask = pagesize - 1;
10291         u32 nvram_cmd;
10292         u8 *tmp;
10293
10294         tmp = kmalloc(pagesize, GFP_KERNEL);
10295         if (tmp == NULL)
10296                 return -ENOMEM;
10297
10298         while (len) {
10299                 int j;
10300                 u32 phy_addr, page_off, size;
10301
10302                 phy_addr = offset & ~pagemask;
10303
10304                 for (j = 0; j < pagesize; j += 4) {
10305                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10306                                                 (__le32 *) (tmp + j))))
10307                                 break;
10308                 }
10309                 if (ret)
10310                         break;
10311
10312                 page_off = offset & pagemask;
10313                 size = pagesize;
10314                 if (len < size)
10315                         size = len;
10316
10317                 len -= size;
10318
10319                 memcpy(tmp + page_off, buf, size);
10320
10321                 offset = offset + (pagesize - page_off);
10322
10323                 tg3_enable_nvram_access(tp);
10324
10325                 /*
10326                  * Before we can erase the flash page, we need
10327                  * to issue a special "write enable" command.
10328                  */
10329                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10330
10331                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10332                         break;
10333
10334                 /* Erase the target page */
10335                 tw32(NVRAM_ADDR, phy_addr);
10336
10337                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10338                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10339
10340                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10341                         break;
10342
10343                 /* Issue another write enable to start the write. */
10344                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10345
10346                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10347                         break;
10348
10349                 for (j = 0; j < pagesize; j += 4) {
10350                         __be32 data;
10351
10352                         data = *((__be32 *) (tmp + j));
10353                         /* swab32(le32_to_cpu(data)), actually */
10354                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
10355
10356                         tw32(NVRAM_ADDR, phy_addr + j);
10357
10358                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10359                                 NVRAM_CMD_WR;
10360
10361                         if (j == 0)
10362                                 nvram_cmd |= NVRAM_CMD_FIRST;
10363                         else if (j == (pagesize - 4))
10364                                 nvram_cmd |= NVRAM_CMD_LAST;
10365
10366                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10367                                 break;
10368                 }
10369                 if (ret)
10370                         break;
10371         }
10372
10373         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10374         tg3_nvram_exec_cmd(tp, nvram_cmd);
10375
10376         kfree(tmp);
10377
10378         return ret;
10379 }
10380
10381 /* offset and length are dword aligned */
10382 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10383                 u8 *buf)
10384 {
10385         int i, ret = 0;
10386
10387         for (i = 0; i < len; i += 4, offset += 4) {
10388                 u32 page_off, phy_addr, nvram_cmd;
10389                 __be32 data;
10390
10391                 memcpy(&data, buf + i, 4);
10392                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10393
10394                 page_off = offset % tp->nvram_pagesize;
10395
10396                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10397
10398                 tw32(NVRAM_ADDR, phy_addr);
10399
10400                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10401
10402                 if ((page_off == 0) || (i == 0))
10403                         nvram_cmd |= NVRAM_CMD_FIRST;
10404                 if (page_off == (tp->nvram_pagesize - 4))
10405                         nvram_cmd |= NVRAM_CMD_LAST;
10406
10407                 if (i == (len - 4))
10408                         nvram_cmd |= NVRAM_CMD_LAST;
10409
10410                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10411                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10412                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10413                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10414                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10415                     (tp->nvram_jedecnum == JEDEC_ST) &&
10416                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10417
10418                         if ((ret = tg3_nvram_exec_cmd(tp,
10419                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10420                                 NVRAM_CMD_DONE)))
10421
10422                                 break;
10423                 }
10424                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10425                         /* We always do complete word writes to eeprom. */
10426                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10427                 }
10428
10429                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10430                         break;
10431         }
10432         return ret;
10433 }
10434
10435 /* offset and length are dword aligned */
10436 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10437 {
10438         int ret;
10439
10440         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10441                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10442                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10443                 udelay(40);
10444         }
10445
10446         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10447                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10448         }
10449         else {
10450                 u32 grc_mode;
10451
10452                 ret = tg3_nvram_lock(tp);
10453                 if (ret)
10454                         return ret;
10455
10456                 tg3_enable_nvram_access(tp);
10457                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10458                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10459                         tw32(NVRAM_WRITE1, 0x406);
10460
10461                 grc_mode = tr32(GRC_MODE);
10462                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10463
10464                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10465                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10466
10467                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10468                                 buf);
10469                 }
10470                 else {
10471                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10472                                 buf);
10473                 }
10474
10475                 grc_mode = tr32(GRC_MODE);
10476                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10477
10478                 tg3_disable_nvram_access(tp);
10479                 tg3_nvram_unlock(tp);
10480         }
10481
10482         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10483                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10484                 udelay(40);
10485         }
10486
10487         return ret;
10488 }
10489
10490 struct subsys_tbl_ent {
10491         u16 subsys_vendor, subsys_devid;
10492         u32 phy_id;
10493 };
10494
10495 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10496         /* Broadcom boards. */
10497         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10498         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10499         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10500         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10501         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10502         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10503         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10504         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10505         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10506         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10507         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10508
10509         /* 3com boards. */
10510         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10511         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10512         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10513         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10514         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10515
10516         /* DELL boards. */
10517         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10518         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10519         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10520         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10521
10522         /* Compaq boards. */
10523         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10524         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10525         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10526         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10527         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10528
10529         /* IBM boards. */
10530         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10531 };
10532
10533 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10534 {
10535         int i;
10536
10537         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10538                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10539                      tp->pdev->subsystem_vendor) &&
10540                     (subsys_id_to_phy_id[i].subsys_devid ==
10541                      tp->pdev->subsystem_device))
10542                         return &subsys_id_to_phy_id[i];
10543         }
10544         return NULL;
10545 }
10546
10547 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10548 {
10549         u32 val;
10550         u16 pmcsr;
10551
10552         /* On some early chips the SRAM cannot be accessed in D3hot state,
10553          * so need make sure we're in D0.
10554          */
10555         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10556         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10557         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10558         msleep(1);
10559
10560         /* Make sure register accesses (indirect or otherwise)
10561          * will function correctly.
10562          */
10563         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10564                                tp->misc_host_ctrl);
10565
10566         /* The memory arbiter has to be enabled in order for SRAM accesses
10567          * to succeed.  Normally on powerup the tg3 chip firmware will make
10568          * sure it is enabled, but other entities such as system netboot
10569          * code might disable it.
10570          */
10571         val = tr32(MEMARB_MODE);
10572         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10573
10574         tp->phy_id = PHY_ID_INVALID;
10575         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10576
10577         /* Assume an onboard device and WOL capable by default.  */
10578         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10579
10580         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10581                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10582                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10583                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10584                 }
10585                 val = tr32(VCPU_CFGSHDW);
10586                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10587                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10588                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10589                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10590                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10591                 return;
10592         }
10593
10594         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10595         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10596                 u32 nic_cfg, led_cfg;
10597                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10598                 int eeprom_phy_serdes = 0;
10599
10600                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10601                 tp->nic_sram_data_cfg = nic_cfg;
10602
10603                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10604                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10605                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10606                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10607                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10608                     (ver > 0) && (ver < 0x100))
10609                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10610
10611                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10612                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10613                         eeprom_phy_serdes = 1;
10614
10615                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10616                 if (nic_phy_id != 0) {
10617                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10618                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10619
10620                         eeprom_phy_id  = (id1 >> 16) << 10;
10621                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10622                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10623                 } else
10624                         eeprom_phy_id = 0;
10625
10626                 tp->phy_id = eeprom_phy_id;
10627                 if (eeprom_phy_serdes) {
10628                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10629                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10630                         else
10631                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10632                 }
10633
10634                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10635                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10636                                     SHASTA_EXT_LED_MODE_MASK);
10637                 else
10638                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10639
10640                 switch (led_cfg) {
10641                 default:
10642                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10643                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10644                         break;
10645
10646                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10647                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10648                         break;
10649
10650                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10651                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10652
10653                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10654                          * read on some older 5700/5701 bootcode.
10655                          */
10656                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10657                             ASIC_REV_5700 ||
10658                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10659                             ASIC_REV_5701)
10660                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10661
10662                         break;
10663
10664                 case SHASTA_EXT_LED_SHARED:
10665                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10666                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10667                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10668                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10669                                                  LED_CTRL_MODE_PHY_2);
10670                         break;
10671
10672                 case SHASTA_EXT_LED_MAC:
10673                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10674                         break;
10675
10676                 case SHASTA_EXT_LED_COMBO:
10677                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10678                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10679                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10680                                                  LED_CTRL_MODE_PHY_2);
10681                         break;
10682
10683                 };
10684
10685                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10686                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10687                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10688                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10689
10690                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
10691                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1)
10692                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10693
10694                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10695                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10696                         if ((tp->pdev->subsystem_vendor ==
10697                              PCI_VENDOR_ID_ARIMA) &&
10698                             (tp->pdev->subsystem_device == 0x205a ||
10699                              tp->pdev->subsystem_device == 0x2063))
10700                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10701                 } else {
10702                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10703                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10704                 }
10705
10706                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10707                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10708                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10709                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10710                 }
10711                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10712                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10713                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10714                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10715                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10716
10717                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10718                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10719                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10720
10721                 if (cfg2 & (1 << 17))
10722                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10723
10724                 /* serdes signal pre-emphasis in register 0x590 set by */
10725                 /* bootcode if bit 18 is set */
10726                 if (cfg2 & (1 << 18))
10727                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10728
10729                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10730                         u32 cfg3;
10731
10732                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10733                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10734                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10735                 }
10736         }
10737 }
10738
10739 static int __devinit tg3_phy_probe(struct tg3 *tp)
10740 {
10741         u32 hw_phy_id_1, hw_phy_id_2;
10742         u32 hw_phy_id, hw_phy_id_masked;
10743         int err;
10744
10745         /* Reading the PHY ID register can conflict with ASF
10746          * firwmare access to the PHY hardware.
10747          */
10748         err = 0;
10749         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10750             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10751                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10752         } else {
10753                 /* Now read the physical PHY_ID from the chip and verify
10754                  * that it is sane.  If it doesn't look good, we fall back
10755                  * to either the hard-coded table based PHY_ID and failing
10756                  * that the value found in the eeprom area.
10757                  */
10758                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10759                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10760
10761                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10762                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10763                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10764
10765                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10766         }
10767
10768         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10769                 tp->phy_id = hw_phy_id;
10770                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10771                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10772                 else
10773                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10774         } else {
10775                 if (tp->phy_id != PHY_ID_INVALID) {
10776                         /* Do nothing, phy ID already set up in
10777                          * tg3_get_eeprom_hw_cfg().
10778                          */
10779                 } else {
10780                         struct subsys_tbl_ent *p;
10781
10782                         /* No eeprom signature?  Try the hardcoded
10783                          * subsys device table.
10784                          */
10785                         p = lookup_by_subsys(tp);
10786                         if (!p)
10787                                 return -ENODEV;
10788
10789                         tp->phy_id = p->phy_id;
10790                         if (!tp->phy_id ||
10791                             tp->phy_id == PHY_ID_BCM8002)
10792                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10793                 }
10794         }
10795
10796         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10797             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10798             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10799                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10800
10801                 tg3_readphy(tp, MII_BMSR, &bmsr);
10802                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10803                     (bmsr & BMSR_LSTATUS))
10804                         goto skip_phy_reset;
10805
10806                 err = tg3_phy_reset(tp);
10807                 if (err)
10808                         return err;
10809
10810                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10811                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10812                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10813                 tg3_ctrl = 0;
10814                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10815                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10816                                     MII_TG3_CTRL_ADV_1000_FULL);
10817                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10818                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10819                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10820                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10821                 }
10822
10823                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10824                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10825                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10826                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10827                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10828
10829                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10830                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10831
10832                         tg3_writephy(tp, MII_BMCR,
10833                                      BMCR_ANENABLE | BMCR_ANRESTART);
10834                 }
10835                 tg3_phy_set_wirespeed(tp);
10836
10837                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10838                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10839                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10840         }
10841
10842 skip_phy_reset:
10843         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10844                 err = tg3_init_5401phy_dsp(tp);
10845                 if (err)
10846                         return err;
10847         }
10848
10849         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10850                 err = tg3_init_5401phy_dsp(tp);
10851         }
10852
10853         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10854                 tp->link_config.advertising =
10855                         (ADVERTISED_1000baseT_Half |
10856                          ADVERTISED_1000baseT_Full |
10857                          ADVERTISED_Autoneg |
10858                          ADVERTISED_FIBRE);
10859         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10860                 tp->link_config.advertising &=
10861                         ~(ADVERTISED_1000baseT_Half |
10862                           ADVERTISED_1000baseT_Full);
10863
10864         return err;
10865 }
10866
10867 static void __devinit tg3_read_partno(struct tg3 *tp)
10868 {
10869         unsigned char vpd_data[256];
10870         unsigned int i;
10871         u32 magic;
10872
10873         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10874                 goto out_not_found;
10875
10876         if (magic == TG3_EEPROM_MAGIC) {
10877                 for (i = 0; i < 256; i += 4) {
10878                         u32 tmp;
10879
10880                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10881                                 goto out_not_found;
10882
10883                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10884                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10885                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10886                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10887                 }
10888         } else {
10889                 int vpd_cap;
10890
10891                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10892                 for (i = 0; i < 256; i += 4) {
10893                         u32 tmp, j = 0;
10894                         __le32 v;
10895                         u16 tmp16;
10896
10897                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10898                                               i);
10899                         while (j++ < 100) {
10900                                 pci_read_config_word(tp->pdev, vpd_cap +
10901                                                      PCI_VPD_ADDR, &tmp16);
10902                                 if (tmp16 & 0x8000)
10903                                         break;
10904                                 msleep(1);
10905                         }
10906                         if (!(tmp16 & 0x8000))
10907                                 goto out_not_found;
10908
10909                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10910                                               &tmp);
10911                         v = cpu_to_le32(tmp);
10912                         memcpy(&vpd_data[i], &v, 4);
10913                 }
10914         }
10915
10916         /* Now parse and find the part number. */
10917         for (i = 0; i < 254; ) {
10918                 unsigned char val = vpd_data[i];
10919                 unsigned int block_end;
10920
10921                 if (val == 0x82 || val == 0x91) {
10922                         i = (i + 3 +
10923                              (vpd_data[i + 1] +
10924                               (vpd_data[i + 2] << 8)));
10925                         continue;
10926                 }
10927
10928                 if (val != 0x90)
10929                         goto out_not_found;
10930
10931                 block_end = (i + 3 +
10932                              (vpd_data[i + 1] +
10933                               (vpd_data[i + 2] << 8)));
10934                 i += 3;
10935
10936                 if (block_end > 256)
10937                         goto out_not_found;
10938
10939                 while (i < (block_end - 2)) {
10940                         if (vpd_data[i + 0] == 'P' &&
10941                             vpd_data[i + 1] == 'N') {
10942                                 int partno_len = vpd_data[i + 2];
10943
10944                                 i += 3;
10945                                 if (partno_len > 24 || (partno_len + i) > 256)
10946                                         goto out_not_found;
10947
10948                                 memcpy(tp->board_part_number,
10949                                        &vpd_data[i], partno_len);
10950
10951                                 /* Success. */
10952                                 return;
10953                         }
10954                         i += 3 + vpd_data[i + 2];
10955                 }
10956
10957                 /* Part number not found. */
10958                 goto out_not_found;
10959         }
10960
10961 out_not_found:
10962         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10963                 strcpy(tp->board_part_number, "BCM95906");
10964         else
10965                 strcpy(tp->board_part_number, "none");
10966 }
10967
10968 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10969 {
10970         u32 val;
10971
10972         if (tg3_nvram_read_swab(tp, offset, &val) ||
10973             (val & 0xfc000000) != 0x0c000000 ||
10974             tg3_nvram_read_swab(tp, offset + 4, &val) ||
10975             val != 0)
10976                 return 0;
10977
10978         return 1;
10979 }
10980
10981 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10982 {
10983         u32 val, offset, start;
10984         u32 ver_offset;
10985         int i, bcnt;
10986
10987         if (tg3_nvram_read_swab(tp, 0, &val))
10988                 return;
10989
10990         if (val != TG3_EEPROM_MAGIC)
10991                 return;
10992
10993         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10994             tg3_nvram_read_swab(tp, 0x4, &start))
10995                 return;
10996
10997         offset = tg3_nvram_logical_addr(tp, offset);
10998
10999         if (!tg3_fw_img_is_valid(tp, offset) ||
11000             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11001                 return;
11002
11003         offset = offset + ver_offset - start;
11004         for (i = 0; i < 16; i += 4) {
11005                 __le32 v;
11006                 if (tg3_nvram_read_le(tp, offset + i, &v))
11007                         return;
11008
11009                 memcpy(tp->fw_ver + i, &v, 4);
11010         }
11011
11012         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11013              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11014                 return;
11015
11016         for (offset = TG3_NVM_DIR_START;
11017              offset < TG3_NVM_DIR_END;
11018              offset += TG3_NVM_DIRENT_SIZE) {
11019                 if (tg3_nvram_read_swab(tp, offset, &val))
11020                         return;
11021
11022                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11023                         break;
11024         }
11025
11026         if (offset == TG3_NVM_DIR_END)
11027                 return;
11028
11029         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11030                 start = 0x08000000;
11031         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11032                 return;
11033
11034         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11035             !tg3_fw_img_is_valid(tp, offset) ||
11036             tg3_nvram_read_swab(tp, offset + 8, &val))
11037                 return;
11038
11039         offset += val - start;
11040
11041         bcnt = strlen(tp->fw_ver);
11042
11043         tp->fw_ver[bcnt++] = ',';
11044         tp->fw_ver[bcnt++] = ' ';
11045
11046         for (i = 0; i < 4; i++) {
11047                 __le32 v;
11048                 if (tg3_nvram_read_le(tp, offset, &v))
11049                         return;
11050
11051                 offset += sizeof(v);
11052
11053                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11054                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11055                         break;
11056                 }
11057
11058                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11059                 bcnt += sizeof(v);
11060         }
11061
11062         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11063 }
11064
11065 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11066
11067 static int __devinit tg3_get_invariants(struct tg3 *tp)
11068 {
11069         static struct pci_device_id write_reorder_chipsets[] = {
11070                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11071                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11072                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11073                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11074                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11075                              PCI_DEVICE_ID_VIA_8385_0) },
11076                 { },
11077         };
11078         u32 misc_ctrl_reg;
11079         u32 cacheline_sz_reg;
11080         u32 pci_state_reg, grc_misc_cfg;
11081         u32 val;
11082         u16 pci_cmd;
11083         int err, pcie_cap;
11084
11085         /* Force memory write invalidate off.  If we leave it on,
11086          * then on 5700_BX chips we have to enable a workaround.
11087          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11088          * to match the cacheline size.  The Broadcom driver have this
11089          * workaround but turns MWI off all the times so never uses
11090          * it.  This seems to suggest that the workaround is insufficient.
11091          */
11092         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11093         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11094         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11095
11096         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11097          * has the register indirect write enable bit set before
11098          * we try to access any of the MMIO registers.  It is also
11099          * critical that the PCI-X hw workaround situation is decided
11100          * before that as well.
11101          */
11102         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11103                               &misc_ctrl_reg);
11104
11105         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11106                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11107         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11108                 u32 prod_id_asic_rev;
11109
11110                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11111                                       &prod_id_asic_rev);
11112                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11113         }
11114
11115         /* Wrong chip ID in 5752 A0. This code can be removed later
11116          * as A0 is not in production.
11117          */
11118         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11119                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11120
11121         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11122          * we need to disable memory and use config. cycles
11123          * only to access all registers. The 5702/03 chips
11124          * can mistakenly decode the special cycles from the
11125          * ICH chipsets as memory write cycles, causing corruption
11126          * of register and memory space. Only certain ICH bridges
11127          * will drive special cycles with non-zero data during the
11128          * address phase which can fall within the 5703's address
11129          * range. This is not an ICH bug as the PCI spec allows
11130          * non-zero address during special cycles. However, only
11131          * these ICH bridges are known to drive non-zero addresses
11132          * during special cycles.
11133          *
11134          * Since special cycles do not cross PCI bridges, we only
11135          * enable this workaround if the 5703 is on the secondary
11136          * bus of these ICH bridges.
11137          */
11138         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11139             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11140                 static struct tg3_dev_id {
11141                         u32     vendor;
11142                         u32     device;
11143                         u32     rev;
11144                 } ich_chipsets[] = {
11145                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11146                           PCI_ANY_ID },
11147                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11148                           PCI_ANY_ID },
11149                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11150                           0xa },
11151                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11152                           PCI_ANY_ID },
11153                         { },
11154                 };
11155                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11156                 struct pci_dev *bridge = NULL;
11157
11158                 while (pci_id->vendor != 0) {
11159                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11160                                                 bridge);
11161                         if (!bridge) {
11162                                 pci_id++;
11163                                 continue;
11164                         }
11165                         if (pci_id->rev != PCI_ANY_ID) {
11166                                 if (bridge->revision > pci_id->rev)
11167                                         continue;
11168                         }
11169                         if (bridge->subordinate &&
11170                             (bridge->subordinate->number ==
11171                              tp->pdev->bus->number)) {
11172
11173                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11174                                 pci_dev_put(bridge);
11175                                 break;
11176                         }
11177                 }
11178         }
11179
11180         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11181          * DMA addresses > 40-bit. This bridge may have other additional
11182          * 57xx devices behind it in some 4-port NIC designs for example.
11183          * Any tg3 device found behind the bridge will also need the 40-bit
11184          * DMA workaround.
11185          */
11186         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11187             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11188                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11189                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11190                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11191         }
11192         else {
11193                 struct pci_dev *bridge = NULL;
11194
11195                 do {
11196                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11197                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11198                                                 bridge);
11199                         if (bridge && bridge->subordinate &&
11200                             (bridge->subordinate->number <=
11201                              tp->pdev->bus->number) &&
11202                             (bridge->subordinate->subordinate >=
11203                              tp->pdev->bus->number)) {
11204                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11205                                 pci_dev_put(bridge);
11206                                 break;
11207                         }
11208                 } while (bridge);
11209         }
11210
11211         /* Initialize misc host control in PCI block. */
11212         tp->misc_host_ctrl |= (misc_ctrl_reg &
11213                                MISC_HOST_CTRL_CHIPREV);
11214         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11215                                tp->misc_host_ctrl);
11216
11217         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11218                               &cacheline_sz_reg);
11219
11220         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11221         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11222         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11223         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11224
11225         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11226             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11227                 tp->pdev_peer = tg3_find_peer(tp);
11228
11229         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11230             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11231             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11232             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11233             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11234             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11235             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11236             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11237                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11238
11239         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11240             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11241                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11242
11243         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11244                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11245                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11246                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11247                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11248                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11249                      tp->pdev_peer == tp->pdev))
11250                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11251
11252                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11253                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11254                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11255                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11256                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11257                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11258                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11259                 } else {
11260                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11261                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11262                                 ASIC_REV_5750 &&
11263                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11264                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11265                 }
11266         }
11267
11268         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11269             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11270             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11271             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11272             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11273             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11274             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11275             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11276                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11277
11278         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11279         if (pcie_cap != 0) {
11280                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11281
11282                 pcie_set_readrq(tp->pdev, 4096);
11283
11284                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11285                         u16 lnkctl;
11286
11287                         pci_read_config_word(tp->pdev,
11288                                              pcie_cap + PCI_EXP_LNKCTL,
11289                                              &lnkctl);
11290                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11291                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11292                 }
11293         }
11294
11295         /* If we have an AMD 762 or VIA K8T800 chipset, write
11296          * reordering to the mailbox registers done by the host
11297          * controller can cause major troubles.  We read back from
11298          * every mailbox register write to force the writes to be
11299          * posted to the chip in order.
11300          */
11301         if (pci_dev_present(write_reorder_chipsets) &&
11302             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11303                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11304
11305         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11306             tp->pci_lat_timer < 64) {
11307                 tp->pci_lat_timer = 64;
11308
11309                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11310                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11311                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11312                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11313
11314                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11315                                        cacheline_sz_reg);
11316         }
11317
11318         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11319             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11320                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11321                 if (!tp->pcix_cap) {
11322                         printk(KERN_ERR PFX "Cannot find PCI-X "
11323                                             "capability, aborting.\n");
11324                         return -EIO;
11325                 }
11326         }
11327
11328         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11329                               &pci_state_reg);
11330
11331         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11332                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11333
11334                 /* If this is a 5700 BX chipset, and we are in PCI-X
11335                  * mode, enable register write workaround.
11336                  *
11337                  * The workaround is to use indirect register accesses
11338                  * for all chip writes not to mailbox registers.
11339                  */
11340                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11341                         u32 pm_reg;
11342
11343                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11344
11345                         /* The chip can have it's power management PCI config
11346                          * space registers clobbered due to this bug.
11347                          * So explicitly force the chip into D0 here.
11348                          */
11349                         pci_read_config_dword(tp->pdev,
11350                                               tp->pm_cap + PCI_PM_CTRL,
11351                                               &pm_reg);
11352                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11353                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11354                         pci_write_config_dword(tp->pdev,
11355                                                tp->pm_cap + PCI_PM_CTRL,
11356                                                pm_reg);
11357
11358                         /* Also, force SERR#/PERR# in PCI command. */
11359                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11360                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11361                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11362                 }
11363         }
11364
11365         /* 5700 BX chips need to have their TX producer index mailboxes
11366          * written twice to workaround a bug.
11367          */
11368         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11369                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11370
11371         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11372                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11373         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11374                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11375
11376         /* Chip-specific fixup from Broadcom driver */
11377         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11378             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11379                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11380                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11381         }
11382
11383         /* Default fast path register access methods */
11384         tp->read32 = tg3_read32;
11385         tp->write32 = tg3_write32;
11386         tp->read32_mbox = tg3_read32;
11387         tp->write32_mbox = tg3_write32;
11388         tp->write32_tx_mbox = tg3_write32;
11389         tp->write32_rx_mbox = tg3_write32;
11390
11391         /* Various workaround register access methods */
11392         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11393                 tp->write32 = tg3_write_indirect_reg32;
11394         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11395                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11396                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11397                 /*
11398                  * Back to back register writes can cause problems on these
11399                  * chips, the workaround is to read back all reg writes
11400                  * except those to mailbox regs.
11401                  *
11402                  * See tg3_write_indirect_reg32().
11403                  */
11404                 tp->write32 = tg3_write_flush_reg32;
11405         }
11406
11407
11408         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11409             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11410                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11411                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11412                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11413         }
11414
11415         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11416                 tp->read32 = tg3_read_indirect_reg32;
11417                 tp->write32 = tg3_write_indirect_reg32;
11418                 tp->read32_mbox = tg3_read_indirect_mbox;
11419                 tp->write32_mbox = tg3_write_indirect_mbox;
11420                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11421                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11422
11423                 iounmap(tp->regs);
11424                 tp->regs = NULL;
11425
11426                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11427                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11428                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11429         }
11430         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11431                 tp->read32_mbox = tg3_read32_mbox_5906;
11432                 tp->write32_mbox = tg3_write32_mbox_5906;
11433                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11434                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11435         }
11436
11437         if (tp->write32 == tg3_write_indirect_reg32 ||
11438             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11439              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11440               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11441                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11442
11443         /* Get eeprom hw config before calling tg3_set_power_state().
11444          * In particular, the TG3_FLG2_IS_NIC flag must be
11445          * determined before calling tg3_set_power_state() so that
11446          * we know whether or not to switch out of Vaux power.
11447          * When the flag is set, it means that GPIO1 is used for eeprom
11448          * write protect and also implies that it is a LOM where GPIOs
11449          * are not used to switch power.
11450          */
11451         tg3_get_eeprom_hw_cfg(tp);
11452
11453         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11454                 /* Allow reads and writes to the
11455                  * APE register and memory space.
11456                  */
11457                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11458                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11459                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11460                                        pci_state_reg);
11461         }
11462
11463         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11464             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11465                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11466
11467                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11468                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11469                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11470                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11471                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11472         }
11473
11474         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11475          * GPIO1 driven high will bring 5700's external PHY out of reset.
11476          * It is also used as eeprom write protect on LOMs.
11477          */
11478         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11479         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11480             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11481                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11482                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11483         /* Unused GPIO3 must be driven as output on 5752 because there
11484          * are no pull-up resistors on unused GPIO pins.
11485          */
11486         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11487                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11488
11489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11490                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11491
11492         /* Force the chip into D0. */
11493         err = tg3_set_power_state(tp, PCI_D0);
11494         if (err) {
11495                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11496                        pci_name(tp->pdev));
11497                 return err;
11498         }
11499
11500         /* 5700 B0 chips do not support checksumming correctly due
11501          * to hardware bugs.
11502          */
11503         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11504                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11505
11506         /* Derive initial jumbo mode from MTU assigned in
11507          * ether_setup() via the alloc_etherdev() call
11508          */
11509         if (tp->dev->mtu > ETH_DATA_LEN &&
11510             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11511                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11512
11513         /* Determine WakeOnLan speed to use. */
11514         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11515             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11516             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11517             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11518                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11519         } else {
11520                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11521         }
11522
11523         /* A few boards don't want Ethernet@WireSpeed phy feature */
11524         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11525             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11526              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11527              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11528             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11529             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11530                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11531
11532         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11533             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11534                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11535         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11536                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11537
11538         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11539                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11540                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11541                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11542                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11543                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11544                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11545                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11546                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11547                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11548                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11549                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11550         }
11551
11552         tp->coalesce_mode = 0;
11553         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11554             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11555                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11556
11557         /* Initialize MAC MI mode, polling disabled. */
11558         tw32_f(MAC_MI_MODE, tp->mi_mode);
11559         udelay(80);
11560
11561         /* Initialize data/descriptor byte/word swapping. */
11562         val = tr32(GRC_MODE);
11563         val &= GRC_MODE_HOST_STACKUP;
11564         tw32(GRC_MODE, val | tp->grc_mode);
11565
11566         tg3_switch_clocks(tp);
11567
11568         /* Clear this out for sanity. */
11569         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11570
11571         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11572                               &pci_state_reg);
11573         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11574             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11575                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11576
11577                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11578                     chiprevid == CHIPREV_ID_5701_B0 ||
11579                     chiprevid == CHIPREV_ID_5701_B2 ||
11580                     chiprevid == CHIPREV_ID_5701_B5) {
11581                         void __iomem *sram_base;
11582
11583                         /* Write some dummy words into the SRAM status block
11584                          * area, see if it reads back correctly.  If the return
11585                          * value is bad, force enable the PCIX workaround.
11586                          */
11587                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11588
11589                         writel(0x00000000, sram_base);
11590                         writel(0x00000000, sram_base + 4);
11591                         writel(0xffffffff, sram_base + 4);
11592                         if (readl(sram_base) != 0x00000000)
11593                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11594                 }
11595         }
11596
11597         udelay(50);
11598         tg3_nvram_init(tp);
11599
11600         grc_misc_cfg = tr32(GRC_MISC_CFG);
11601         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11602
11603         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11604             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11605              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11606                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11607
11608         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11609             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11610                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11611         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11612                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11613                                       HOSTCC_MODE_CLRTICK_TXBD);
11614
11615                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11616                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11617                                        tp->misc_host_ctrl);
11618         }
11619
11620         /* these are limited to 10/100 only */
11621         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11622              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11623             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11624              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11625              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11626               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11627               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11628             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11629              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11630               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11631               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11632             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11633                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11634
11635         err = tg3_phy_probe(tp);
11636         if (err) {
11637                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11638                        pci_name(tp->pdev), err);
11639                 /* ... but do not return immediately ... */
11640         }
11641
11642         tg3_read_partno(tp);
11643         tg3_read_fw_ver(tp);
11644
11645         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11646                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11647         } else {
11648                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11649                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11650                 else
11651                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11652         }
11653
11654         /* 5700 {AX,BX} chips have a broken status block link
11655          * change bit implementation, so we must use the
11656          * status register in those cases.
11657          */
11658         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11659                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11660         else
11661                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11662
11663         /* The led_ctrl is set during tg3_phy_probe, here we might
11664          * have to force the link status polling mechanism based
11665          * upon subsystem IDs.
11666          */
11667         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11668             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11669             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11670                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11671                                   TG3_FLAG_USE_LINKCHG_REG);
11672         }
11673
11674         /* For all SERDES we poll the MAC status register. */
11675         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11676                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11677         else
11678                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11679
11680         /* All chips before 5787 can get confused if TX buffers
11681          * straddle the 4GB address boundary in some cases.
11682          */
11683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11684             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11687             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11688                 tp->dev->hard_start_xmit = tg3_start_xmit;
11689         else
11690                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11691
11692         tp->rx_offset = 2;
11693         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11694             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11695                 tp->rx_offset = 0;
11696
11697         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11698
11699         /* Increment the rx prod index on the rx std ring by at most
11700          * 8 for these chips to workaround hw errata.
11701          */
11702         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11703             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11704             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11705                 tp->rx_std_max_post = 8;
11706
11707         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11708                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11709                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11710
11711         return err;
11712 }
11713
11714 #ifdef CONFIG_SPARC
11715 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11716 {
11717         struct net_device *dev = tp->dev;
11718         struct pci_dev *pdev = tp->pdev;
11719         struct device_node *dp = pci_device_to_OF_node(pdev);
11720         const unsigned char *addr;
11721         int len;
11722
11723         addr = of_get_property(dp, "local-mac-address", &len);
11724         if (addr && len == 6) {
11725                 memcpy(dev->dev_addr, addr, 6);
11726                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11727                 return 0;
11728         }
11729         return -ENODEV;
11730 }
11731
11732 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11733 {
11734         struct net_device *dev = tp->dev;
11735
11736         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11737         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11738         return 0;
11739 }
11740 #endif
11741
11742 static int __devinit tg3_get_device_address(struct tg3 *tp)
11743 {
11744         struct net_device *dev = tp->dev;
11745         u32 hi, lo, mac_offset;
11746         int addr_ok = 0;
11747
11748 #ifdef CONFIG_SPARC
11749         if (!tg3_get_macaddr_sparc(tp))
11750                 return 0;
11751 #endif
11752
11753         mac_offset = 0x7c;
11754         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11755             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11756                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11757                         mac_offset = 0xcc;
11758                 if (tg3_nvram_lock(tp))
11759                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11760                 else
11761                         tg3_nvram_unlock(tp);
11762         }
11763         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11764                 mac_offset = 0x10;
11765
11766         /* First try to get it from MAC address mailbox. */
11767         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11768         if ((hi >> 16) == 0x484b) {
11769                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11770                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11771
11772                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11773                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11774                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11775                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11776                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11777
11778                 /* Some old bootcode may report a 0 MAC address in SRAM */
11779                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11780         }
11781         if (!addr_ok) {
11782                 /* Next, try NVRAM. */
11783                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11784                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11785                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11786                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11787                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11788                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11789                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11790                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11791                 }
11792                 /* Finally just fetch it out of the MAC control regs. */
11793                 else {
11794                         hi = tr32(MAC_ADDR_0_HIGH);
11795                         lo = tr32(MAC_ADDR_0_LOW);
11796
11797                         dev->dev_addr[5] = lo & 0xff;
11798                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11799                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11800                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11801                         dev->dev_addr[1] = hi & 0xff;
11802                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11803                 }
11804         }
11805
11806         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11807 #ifdef CONFIG_SPARC64
11808                 if (!tg3_get_default_macaddr_sparc(tp))
11809                         return 0;
11810 #endif
11811                 return -EINVAL;
11812         }
11813         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11814         return 0;
11815 }
11816
11817 #define BOUNDARY_SINGLE_CACHELINE       1
11818 #define BOUNDARY_MULTI_CACHELINE        2
11819
11820 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11821 {
11822         int cacheline_size;
11823         u8 byte;
11824         int goal;
11825
11826         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11827         if (byte == 0)
11828                 cacheline_size = 1024;
11829         else
11830                 cacheline_size = (int) byte * 4;
11831
11832         /* On 5703 and later chips, the boundary bits have no
11833          * effect.
11834          */
11835         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11836             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11837             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11838                 goto out;
11839
11840 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11841         goal = BOUNDARY_MULTI_CACHELINE;
11842 #else
11843 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11844         goal = BOUNDARY_SINGLE_CACHELINE;
11845 #else
11846         goal = 0;
11847 #endif
11848 #endif
11849
11850         if (!goal)
11851                 goto out;
11852
11853         /* PCI controllers on most RISC systems tend to disconnect
11854          * when a device tries to burst across a cache-line boundary.
11855          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11856          *
11857          * Unfortunately, for PCI-E there are only limited
11858          * write-side controls for this, and thus for reads
11859          * we will still get the disconnects.  We'll also waste
11860          * these PCI cycles for both read and write for chips
11861          * other than 5700 and 5701 which do not implement the
11862          * boundary bits.
11863          */
11864         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11865             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11866                 switch (cacheline_size) {
11867                 case 16:
11868                 case 32:
11869                 case 64:
11870                 case 128:
11871                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11872                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11873                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11874                         } else {
11875                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11876                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11877                         }
11878                         break;
11879
11880                 case 256:
11881                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11882                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11883                         break;
11884
11885                 default:
11886                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11887                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11888                         break;
11889                 };
11890         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11891                 switch (cacheline_size) {
11892                 case 16:
11893                 case 32:
11894                 case 64:
11895                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11896                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11897                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11898                                 break;
11899                         }
11900                         /* fallthrough */
11901                 case 128:
11902                 default:
11903                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11904                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11905                         break;
11906                 };
11907         } else {
11908                 switch (cacheline_size) {
11909                 case 16:
11910                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11911                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11912                                         DMA_RWCTRL_WRITE_BNDRY_16);
11913                                 break;
11914                         }
11915                         /* fallthrough */
11916                 case 32:
11917                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11918                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11919                                         DMA_RWCTRL_WRITE_BNDRY_32);
11920                                 break;
11921                         }
11922                         /* fallthrough */
11923                 case 64:
11924                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11925                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11926                                         DMA_RWCTRL_WRITE_BNDRY_64);
11927                                 break;
11928                         }
11929                         /* fallthrough */
11930                 case 128:
11931                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11932                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11933                                         DMA_RWCTRL_WRITE_BNDRY_128);
11934                                 break;
11935                         }
11936                         /* fallthrough */
11937                 case 256:
11938                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11939                                 DMA_RWCTRL_WRITE_BNDRY_256);
11940                         break;
11941                 case 512:
11942                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11943                                 DMA_RWCTRL_WRITE_BNDRY_512);
11944                         break;
11945                 case 1024:
11946                 default:
11947                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11948                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11949                         break;
11950                 };
11951         }
11952
11953 out:
11954         return val;
11955 }
11956
11957 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11958 {
11959         struct tg3_internal_buffer_desc test_desc;
11960         u32 sram_dma_descs;
11961         int i, ret;
11962
11963         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11964
11965         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11966         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11967         tw32(RDMAC_STATUS, 0);
11968         tw32(WDMAC_STATUS, 0);
11969
11970         tw32(BUFMGR_MODE, 0);
11971         tw32(FTQ_RESET, 0);
11972
11973         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11974         test_desc.addr_lo = buf_dma & 0xffffffff;
11975         test_desc.nic_mbuf = 0x00002100;
11976         test_desc.len = size;
11977
11978         /*
11979          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11980          * the *second* time the tg3 driver was getting loaded after an
11981          * initial scan.
11982          *
11983          * Broadcom tells me:
11984          *   ...the DMA engine is connected to the GRC block and a DMA
11985          *   reset may affect the GRC block in some unpredictable way...
11986          *   The behavior of resets to individual blocks has not been tested.
11987          *
11988          * Broadcom noted the GRC reset will also reset all sub-components.
11989          */
11990         if (to_device) {
11991                 test_desc.cqid_sqid = (13 << 8) | 2;
11992
11993                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11994                 udelay(40);
11995         } else {
11996                 test_desc.cqid_sqid = (16 << 8) | 7;
11997
11998                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11999                 udelay(40);
12000         }
12001         test_desc.flags = 0x00000005;
12002
12003         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12004                 u32 val;
12005
12006                 val = *(((u32 *)&test_desc) + i);
12007                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12008                                        sram_dma_descs + (i * sizeof(u32)));
12009                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12010         }
12011         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12012
12013         if (to_device) {
12014                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12015         } else {
12016                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12017         }
12018
12019         ret = -ENODEV;
12020         for (i = 0; i < 40; i++) {
12021                 u32 val;
12022
12023                 if (to_device)
12024                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12025                 else
12026                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12027                 if ((val & 0xffff) == sram_dma_descs) {
12028                         ret = 0;
12029                         break;
12030                 }
12031
12032                 udelay(100);
12033         }
12034
12035         return ret;
12036 }
12037
12038 #define TEST_BUFFER_SIZE        0x2000
12039
12040 static int __devinit tg3_test_dma(struct tg3 *tp)
12041 {
12042         dma_addr_t buf_dma;
12043         u32 *buf, saved_dma_rwctrl;
12044         int ret;
12045
12046         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12047         if (!buf) {
12048                 ret = -ENOMEM;
12049                 goto out_nofree;
12050         }
12051
12052         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12053                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12054
12055         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12056
12057         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12058                 /* DMA read watermark not used on PCIE */
12059                 tp->dma_rwctrl |= 0x00180000;
12060         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12061                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12062                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12063                         tp->dma_rwctrl |= 0x003f0000;
12064                 else
12065                         tp->dma_rwctrl |= 0x003f000f;
12066         } else {
12067                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12068                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12069                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12070                         u32 read_water = 0x7;
12071
12072                         /* If the 5704 is behind the EPB bridge, we can
12073                          * do the less restrictive ONE_DMA workaround for
12074                          * better performance.
12075                          */
12076                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12077                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12078                                 tp->dma_rwctrl |= 0x8000;
12079                         else if (ccval == 0x6 || ccval == 0x7)
12080                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12081
12082                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12083                                 read_water = 4;
12084                         /* Set bit 23 to enable PCIX hw bug fix */
12085                         tp->dma_rwctrl |=
12086                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12087                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12088                                 (1 << 23);
12089                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12090                         /* 5780 always in PCIX mode */
12091                         tp->dma_rwctrl |= 0x00144000;
12092                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12093                         /* 5714 always in PCIX mode */
12094                         tp->dma_rwctrl |= 0x00148000;
12095                 } else {
12096                         tp->dma_rwctrl |= 0x001b000f;
12097                 }
12098         }
12099
12100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12101             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12102                 tp->dma_rwctrl &= 0xfffffff0;
12103
12104         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12105             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12106                 /* Remove this if it causes problems for some boards. */
12107                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12108
12109                 /* On 5700/5701 chips, we need to set this bit.
12110                  * Otherwise the chip will issue cacheline transactions
12111                  * to streamable DMA memory with not all the byte
12112                  * enables turned on.  This is an error on several
12113                  * RISC PCI controllers, in particular sparc64.
12114                  *
12115                  * On 5703/5704 chips, this bit has been reassigned
12116                  * a different meaning.  In particular, it is used
12117                  * on those chips to enable a PCI-X workaround.
12118                  */
12119                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12120         }
12121
12122         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12123
12124 #if 0
12125         /* Unneeded, already done by tg3_get_invariants.  */
12126         tg3_switch_clocks(tp);
12127 #endif
12128
12129         ret = 0;
12130         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12131             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12132                 goto out;
12133
12134         /* It is best to perform DMA test with maximum write burst size
12135          * to expose the 5700/5701 write DMA bug.
12136          */
12137         saved_dma_rwctrl = tp->dma_rwctrl;
12138         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12139         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12140
12141         while (1) {
12142                 u32 *p = buf, i;
12143
12144                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12145                         p[i] = i;
12146
12147                 /* Send the buffer to the chip. */
12148                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12149                 if (ret) {
12150                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12151                         break;
12152                 }
12153
12154 #if 0
12155                 /* validate data reached card RAM correctly. */
12156                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12157                         u32 val;
12158                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12159                         if (le32_to_cpu(val) != p[i]) {
12160                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12161                                 /* ret = -ENODEV here? */
12162                         }
12163                         p[i] = 0;
12164                 }
12165 #endif
12166                 /* Now read it back. */
12167                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12168                 if (ret) {
12169                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12170
12171                         break;
12172                 }
12173
12174                 /* Verify it. */
12175                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12176                         if (p[i] == i)
12177                                 continue;
12178
12179                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12180                             DMA_RWCTRL_WRITE_BNDRY_16) {
12181                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12182                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12183                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12184                                 break;
12185                         } else {
12186                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12187                                 ret = -ENODEV;
12188                                 goto out;
12189                         }
12190                 }
12191
12192                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12193                         /* Success. */
12194                         ret = 0;
12195                         break;
12196                 }
12197         }
12198         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12199             DMA_RWCTRL_WRITE_BNDRY_16) {
12200                 static struct pci_device_id dma_wait_state_chipsets[] = {
12201                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12202                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12203                         { },
12204                 };
12205
12206                 /* DMA test passed without adjusting DMA boundary,
12207                  * now look for chipsets that are known to expose the
12208                  * DMA bug without failing the test.
12209                  */
12210                 if (pci_dev_present(dma_wait_state_chipsets)) {
12211                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12212                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12213                 }
12214                 else
12215                         /* Safe to use the calculated DMA boundary. */
12216                         tp->dma_rwctrl = saved_dma_rwctrl;
12217
12218                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12219         }
12220
12221 out:
12222         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12223 out_nofree:
12224         return ret;
12225 }
12226
12227 static void __devinit tg3_init_link_config(struct tg3 *tp)
12228 {
12229         tp->link_config.advertising =
12230                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12231                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12232                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12233                  ADVERTISED_Autoneg | ADVERTISED_MII);
12234         tp->link_config.speed = SPEED_INVALID;
12235         tp->link_config.duplex = DUPLEX_INVALID;
12236         tp->link_config.autoneg = AUTONEG_ENABLE;
12237         tp->link_config.active_speed = SPEED_INVALID;
12238         tp->link_config.active_duplex = DUPLEX_INVALID;
12239         tp->link_config.phy_is_low_power = 0;
12240         tp->link_config.orig_speed = SPEED_INVALID;
12241         tp->link_config.orig_duplex = DUPLEX_INVALID;
12242         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12243 }
12244
12245 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12246 {
12247         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12248                 tp->bufmgr_config.mbuf_read_dma_low_water =
12249                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12250                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12251                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12252                 tp->bufmgr_config.mbuf_high_water =
12253                         DEFAULT_MB_HIGH_WATER_5705;
12254                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12255                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12256                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12257                         tp->bufmgr_config.mbuf_high_water =
12258                                 DEFAULT_MB_HIGH_WATER_5906;
12259                 }
12260
12261                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12262                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12263                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12264                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12265                 tp->bufmgr_config.mbuf_high_water_jumbo =
12266                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12267         } else {
12268                 tp->bufmgr_config.mbuf_read_dma_low_water =
12269                         DEFAULT_MB_RDMA_LOW_WATER;
12270                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12271                         DEFAULT_MB_MACRX_LOW_WATER;
12272                 tp->bufmgr_config.mbuf_high_water =
12273                         DEFAULT_MB_HIGH_WATER;
12274
12275                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12276                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12277                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12278                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12279                 tp->bufmgr_config.mbuf_high_water_jumbo =
12280                         DEFAULT_MB_HIGH_WATER_JUMBO;
12281         }
12282
12283         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12284         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12285 }
12286
12287 static char * __devinit tg3_phy_string(struct tg3 *tp)
12288 {
12289         switch (tp->phy_id & PHY_ID_MASK) {
12290         case PHY_ID_BCM5400:    return "5400";
12291         case PHY_ID_BCM5401:    return "5401";
12292         case PHY_ID_BCM5411:    return "5411";
12293         case PHY_ID_BCM5701:    return "5701";
12294         case PHY_ID_BCM5703:    return "5703";
12295         case PHY_ID_BCM5704:    return "5704";
12296         case PHY_ID_BCM5705:    return "5705";
12297         case PHY_ID_BCM5750:    return "5750";
12298         case PHY_ID_BCM5752:    return "5752";
12299         case PHY_ID_BCM5714:    return "5714";
12300         case PHY_ID_BCM5780:    return "5780";
12301         case PHY_ID_BCM5755:    return "5755";
12302         case PHY_ID_BCM5787:    return "5787";
12303         case PHY_ID_BCM5784:    return "5784";
12304         case PHY_ID_BCM5756:    return "5722/5756";
12305         case PHY_ID_BCM5906:    return "5906";
12306         case PHY_ID_BCM5761:    return "5761";
12307         case PHY_ID_BCM8002:    return "8002/serdes";
12308         case 0:                 return "serdes";
12309         default:                return "unknown";
12310         };
12311 }
12312
12313 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12314 {
12315         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12316                 strcpy(str, "PCI Express");
12317                 return str;
12318         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12319                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12320
12321                 strcpy(str, "PCIX:");
12322
12323                 if ((clock_ctrl == 7) ||
12324                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12325                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12326                         strcat(str, "133MHz");
12327                 else if (clock_ctrl == 0)
12328                         strcat(str, "33MHz");
12329                 else if (clock_ctrl == 2)
12330                         strcat(str, "50MHz");
12331                 else if (clock_ctrl == 4)
12332                         strcat(str, "66MHz");
12333                 else if (clock_ctrl == 6)
12334                         strcat(str, "100MHz");
12335         } else {
12336                 strcpy(str, "PCI:");
12337                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12338                         strcat(str, "66MHz");
12339                 else
12340                         strcat(str, "33MHz");
12341         }
12342         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12343                 strcat(str, ":32-bit");
12344         else
12345                 strcat(str, ":64-bit");
12346         return str;
12347 }
12348
12349 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12350 {
12351         struct pci_dev *peer;
12352         unsigned int func, devnr = tp->pdev->devfn & ~7;
12353
12354         for (func = 0; func < 8; func++) {
12355                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12356                 if (peer && peer != tp->pdev)
12357                         break;
12358                 pci_dev_put(peer);
12359         }
12360         /* 5704 can be configured in single-port mode, set peer to
12361          * tp->pdev in that case.
12362          */
12363         if (!peer) {
12364                 peer = tp->pdev;
12365                 return peer;
12366         }
12367
12368         /*
12369          * We don't need to keep the refcount elevated; there's no way
12370          * to remove one half of this device without removing the other
12371          */
12372         pci_dev_put(peer);
12373
12374         return peer;
12375 }
12376
12377 static void __devinit tg3_init_coal(struct tg3 *tp)
12378 {
12379         struct ethtool_coalesce *ec = &tp->coal;
12380
12381         memset(ec, 0, sizeof(*ec));
12382         ec->cmd = ETHTOOL_GCOALESCE;
12383         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12384         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12385         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12386         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12387         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12388         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12389         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12390         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12391         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12392
12393         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12394                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12395                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12396                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12397                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12398                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12399         }
12400
12401         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12402                 ec->rx_coalesce_usecs_irq = 0;
12403                 ec->tx_coalesce_usecs_irq = 0;
12404                 ec->stats_block_coalesce_usecs = 0;
12405         }
12406 }
12407
12408 static int __devinit tg3_init_one(struct pci_dev *pdev,
12409                                   const struct pci_device_id *ent)
12410 {
12411         static int tg3_version_printed = 0;
12412         unsigned long tg3reg_base, tg3reg_len;
12413         struct net_device *dev;
12414         struct tg3 *tp;
12415         int err, pm_cap;
12416         char str[40];
12417         u64 dma_mask, persist_dma_mask;
12418         DECLARE_MAC_BUF(mac);
12419
12420         if (tg3_version_printed++ == 0)
12421                 printk(KERN_INFO "%s", version);
12422
12423         err = pci_enable_device(pdev);
12424         if (err) {
12425                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12426                        "aborting.\n");
12427                 return err;
12428         }
12429
12430         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12431                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12432                        "base address, aborting.\n");
12433                 err = -ENODEV;
12434                 goto err_out_disable_pdev;
12435         }
12436
12437         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12438         if (err) {
12439                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12440                        "aborting.\n");
12441                 goto err_out_disable_pdev;
12442         }
12443
12444         pci_set_master(pdev);
12445
12446         /* Find power-management capability. */
12447         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12448         if (pm_cap == 0) {
12449                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12450                        "aborting.\n");
12451                 err = -EIO;
12452                 goto err_out_free_res;
12453         }
12454
12455         tg3reg_base = pci_resource_start(pdev, 0);
12456         tg3reg_len = pci_resource_len(pdev, 0);
12457
12458         dev = alloc_etherdev(sizeof(*tp));
12459         if (!dev) {
12460                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12461                 err = -ENOMEM;
12462                 goto err_out_free_res;
12463         }
12464
12465         SET_NETDEV_DEV(dev, &pdev->dev);
12466
12467 #if TG3_VLAN_TAG_USED
12468         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12469         dev->vlan_rx_register = tg3_vlan_rx_register;
12470 #endif
12471
12472         tp = netdev_priv(dev);
12473         tp->pdev = pdev;
12474         tp->dev = dev;
12475         tp->pm_cap = pm_cap;
12476         tp->mac_mode = TG3_DEF_MAC_MODE;
12477         tp->rx_mode = TG3_DEF_RX_MODE;
12478         tp->tx_mode = TG3_DEF_TX_MODE;
12479         tp->mi_mode = MAC_MI_MODE_BASE;
12480         if (tg3_debug > 0)
12481                 tp->msg_enable = tg3_debug;
12482         else
12483                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12484
12485         /* The word/byte swap controls here control register access byte
12486          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12487          * setting below.
12488          */
12489         tp->misc_host_ctrl =
12490                 MISC_HOST_CTRL_MASK_PCI_INT |
12491                 MISC_HOST_CTRL_WORD_SWAP |
12492                 MISC_HOST_CTRL_INDIR_ACCESS |
12493                 MISC_HOST_CTRL_PCISTATE_RW;
12494
12495         /* The NONFRM (non-frame) byte/word swap controls take effect
12496          * on descriptor entries, anything which isn't packet data.
12497          *
12498          * The StrongARM chips on the board (one for tx, one for rx)
12499          * are running in big-endian mode.
12500          */
12501         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12502                         GRC_MODE_WSWAP_NONFRM_DATA);
12503 #ifdef __BIG_ENDIAN
12504         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12505 #endif
12506         spin_lock_init(&tp->lock);
12507         spin_lock_init(&tp->indirect_lock);
12508         INIT_WORK(&tp->reset_task, tg3_reset_task);
12509
12510         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12511         if (!tp->regs) {
12512                 printk(KERN_ERR PFX "Cannot map device registers, "
12513                        "aborting.\n");
12514                 err = -ENOMEM;
12515                 goto err_out_free_dev;
12516         }
12517
12518         tg3_init_link_config(tp);
12519
12520         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12521         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12522         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12523
12524         dev->open = tg3_open;
12525         dev->stop = tg3_close;
12526         dev->get_stats = tg3_get_stats;
12527         dev->set_multicast_list = tg3_set_rx_mode;
12528         dev->set_mac_address = tg3_set_mac_addr;
12529         dev->do_ioctl = tg3_ioctl;
12530         dev->tx_timeout = tg3_tx_timeout;
12531         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12532         dev->ethtool_ops = &tg3_ethtool_ops;
12533         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12534         dev->change_mtu = tg3_change_mtu;
12535         dev->irq = pdev->irq;
12536 #ifdef CONFIG_NET_POLL_CONTROLLER
12537         dev->poll_controller = tg3_poll_controller;
12538 #endif
12539
12540         err = tg3_get_invariants(tp);
12541         if (err) {
12542                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12543                        "aborting.\n");
12544                 goto err_out_iounmap;
12545         }
12546
12547         /* The EPB bridge inside 5714, 5715, and 5780 and any
12548          * device behind the EPB cannot support DMA addresses > 40-bit.
12549          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12550          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12551          * do DMA address check in tg3_start_xmit().
12552          */
12553         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12554                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12555         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12556                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12557 #ifdef CONFIG_HIGHMEM
12558                 dma_mask = DMA_64BIT_MASK;
12559 #endif
12560         } else
12561                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12562
12563         /* Configure DMA attributes. */
12564         if (dma_mask > DMA_32BIT_MASK) {
12565                 err = pci_set_dma_mask(pdev, dma_mask);
12566                 if (!err) {
12567                         dev->features |= NETIF_F_HIGHDMA;
12568                         err = pci_set_consistent_dma_mask(pdev,
12569                                                           persist_dma_mask);
12570                         if (err < 0) {
12571                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12572                                        "DMA for consistent allocations\n");
12573                                 goto err_out_iounmap;
12574                         }
12575                 }
12576         }
12577         if (err || dma_mask == DMA_32BIT_MASK) {
12578                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12579                 if (err) {
12580                         printk(KERN_ERR PFX "No usable DMA configuration, "
12581                                "aborting.\n");
12582                         goto err_out_iounmap;
12583                 }
12584         }
12585
12586         tg3_init_bufmgr_config(tp);
12587
12588         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12589                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12590         }
12591         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12592             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12593             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12595             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12596                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12597         } else {
12598                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12599         }
12600
12601         /* TSO is on by default on chips that support hardware TSO.
12602          * Firmware TSO on older chips gives lower performance, so it
12603          * is off by default, but can be enabled using ethtool.
12604          */
12605         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12606                 dev->features |= NETIF_F_TSO;
12607                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12608                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12609                         dev->features |= NETIF_F_TSO6;
12610                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12611                         dev->features |= NETIF_F_TSO_ECN;
12612         }
12613
12614
12615         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12616             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12617             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12618                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12619                 tp->rx_pending = 63;
12620         }
12621
12622         err = tg3_get_device_address(tp);
12623         if (err) {
12624                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12625                        "aborting.\n");
12626                 goto err_out_iounmap;
12627         }
12628
12629         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12630                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12631                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12632                                "base address for APE, aborting.\n");
12633                         err = -ENODEV;
12634                         goto err_out_iounmap;
12635                 }
12636
12637                 tg3reg_base = pci_resource_start(pdev, 2);
12638                 tg3reg_len = pci_resource_len(pdev, 2);
12639
12640                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12641                 if (tp->aperegs == 0UL) {
12642                         printk(KERN_ERR PFX "Cannot map APE registers, "
12643                                "aborting.\n");
12644                         err = -ENOMEM;
12645                         goto err_out_iounmap;
12646                 }
12647
12648                 tg3_ape_lock_init(tp);
12649         }
12650
12651         /*
12652          * Reset chip in case UNDI or EFI driver did not shutdown
12653          * DMA self test will enable WDMAC and we'll see (spurious)
12654          * pending DMA on the PCI bus at that point.
12655          */
12656         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12657             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12658                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12659                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12660         }
12661
12662         err = tg3_test_dma(tp);
12663         if (err) {
12664                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12665                 goto err_out_apeunmap;
12666         }
12667
12668         /* Tigon3 can do ipv4 only... and some chips have buggy
12669          * checksumming.
12670          */
12671         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12672                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12673                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12674                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12675                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12676                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12677                         dev->features |= NETIF_F_IPV6_CSUM;
12678
12679                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12680         } else
12681                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12682
12683         /* flow control autonegotiation is default behavior */
12684         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12685         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
12686
12687         tg3_init_coal(tp);
12688
12689         pci_set_drvdata(pdev, dev);
12690
12691         err = register_netdev(dev);
12692         if (err) {
12693                 printk(KERN_ERR PFX "Cannot register net device, "
12694                        "aborting.\n");
12695                 goto err_out_apeunmap;
12696         }
12697
12698         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12699                "(%s) %s Ethernet %s\n",
12700                dev->name,
12701                tp->board_part_number,
12702                tp->pci_chip_rev_id,
12703                tg3_phy_string(tp),
12704                tg3_bus_string(tp, str),
12705                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12706                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12707                  "10/100/1000Base-T")),
12708                print_mac(mac, dev->dev_addr));
12709
12710         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12711                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12712                dev->name,
12713                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12714                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12715                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12716                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12717                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12718                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12719         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12720                dev->name, tp->dma_rwctrl,
12721                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12722                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12723
12724         return 0;
12725
12726 err_out_apeunmap:
12727         if (tp->aperegs) {
12728                 iounmap(tp->aperegs);
12729                 tp->aperegs = NULL;
12730         }
12731
12732 err_out_iounmap:
12733         if (tp->regs) {
12734                 iounmap(tp->regs);
12735                 tp->regs = NULL;
12736         }
12737
12738 err_out_free_dev:
12739         free_netdev(dev);
12740
12741 err_out_free_res:
12742         pci_release_regions(pdev);
12743
12744 err_out_disable_pdev:
12745         pci_disable_device(pdev);
12746         pci_set_drvdata(pdev, NULL);
12747         return err;
12748 }
12749
12750 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12751 {
12752         struct net_device *dev = pci_get_drvdata(pdev);
12753
12754         if (dev) {
12755                 struct tg3 *tp = netdev_priv(dev);
12756
12757                 flush_scheduled_work();
12758                 unregister_netdev(dev);
12759                 if (tp->aperegs) {
12760                         iounmap(tp->aperegs);
12761                         tp->aperegs = NULL;
12762                 }
12763                 if (tp->regs) {
12764                         iounmap(tp->regs);
12765                         tp->regs = NULL;
12766                 }
12767                 free_netdev(dev);
12768                 pci_release_regions(pdev);
12769                 pci_disable_device(pdev);
12770                 pci_set_drvdata(pdev, NULL);
12771         }
12772 }
12773
12774 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12775 {
12776         struct net_device *dev = pci_get_drvdata(pdev);
12777         struct tg3 *tp = netdev_priv(dev);
12778         int err;
12779
12780         /* PCI register 4 needs to be saved whether netif_running() or not.
12781          * MSI address and data need to be saved if using MSI and
12782          * netif_running().
12783          */
12784         pci_save_state(pdev);
12785
12786         if (!netif_running(dev))
12787                 return 0;
12788
12789         flush_scheduled_work();
12790         tg3_netif_stop(tp);
12791
12792         del_timer_sync(&tp->timer);
12793
12794         tg3_full_lock(tp, 1);
12795         tg3_disable_ints(tp);
12796         tg3_full_unlock(tp);
12797
12798         netif_device_detach(dev);
12799
12800         tg3_full_lock(tp, 0);
12801         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12802         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12803         tg3_full_unlock(tp);
12804
12805         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12806         if (err) {
12807                 tg3_full_lock(tp, 0);
12808
12809                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12810                 if (tg3_restart_hw(tp, 1))
12811                         goto out;
12812
12813                 tp->timer.expires = jiffies + tp->timer_offset;
12814                 add_timer(&tp->timer);
12815
12816                 netif_device_attach(dev);
12817                 tg3_netif_start(tp);
12818
12819 out:
12820                 tg3_full_unlock(tp);
12821         }
12822
12823         return err;
12824 }
12825
12826 static int tg3_resume(struct pci_dev *pdev)
12827 {
12828         struct net_device *dev = pci_get_drvdata(pdev);
12829         struct tg3 *tp = netdev_priv(dev);
12830         int err;
12831
12832         pci_restore_state(tp->pdev);
12833
12834         if (!netif_running(dev))
12835                 return 0;
12836
12837         err = tg3_set_power_state(tp, PCI_D0);
12838         if (err)
12839                 return err;
12840
12841         netif_device_attach(dev);
12842
12843         tg3_full_lock(tp, 0);
12844
12845         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12846         err = tg3_restart_hw(tp, 1);
12847         if (err)
12848                 goto out;
12849
12850         tp->timer.expires = jiffies + tp->timer_offset;
12851         add_timer(&tp->timer);
12852
12853         tg3_netif_start(tp);
12854
12855 out:
12856         tg3_full_unlock(tp);
12857
12858         return err;
12859 }
12860
12861 static struct pci_driver tg3_driver = {
12862         .name           = DRV_MODULE_NAME,
12863         .id_table       = tg3_pci_tbl,
12864         .probe          = tg3_init_one,
12865         .remove         = __devexit_p(tg3_remove_one),
12866         .suspend        = tg3_suspend,
12867         .resume         = tg3_resume
12868 };
12869
12870 static int __init tg3_init(void)
12871 {
12872         return pci_register_driver(&tg3_driver);
12873 }
12874
12875 static void __exit tg3_cleanup(void)
12876 {
12877         pci_unregister_driver(&tg3_driver);
12878 }
12879
12880 module_init(tg3_init);
12881 module_exit(tg3_cleanup);