[TG3]: APE flag fix
[safe/jmp/linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.85"
68 #define DRV_MODULE_RELDATE      "October 18, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808 {
809         u32 phy;
810
811         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813                 return;
814
815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816                 u32 ephy;
817
818                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
820                                      ephy | MII_TG3_EPHY_SHADOW_EN);
821                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822                                 if (enable)
823                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824                                 else
825                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827                         }
828                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829                 }
830         } else {
831                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832                       MII_TG3_AUXCTL_SHDWSEL_MISC;
833                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835                         if (enable)
836                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837                         else
838                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839                         phy |= MII_TG3_AUXCTL_MISC_WREN;
840                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841                 }
842         }
843 }
844
845 static void tg3_phy_set_wirespeed(struct tg3 *tp)
846 {
847         u32 val;
848
849         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850                 return;
851
852         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855                              (val | (1 << 15) | (1 << 4)));
856 }
857
858 static int tg3_bmcr_reset(struct tg3 *tp)
859 {
860         u32 phy_control;
861         int limit, err;
862
863         /* OK, reset it, and poll the BMCR_RESET bit until it
864          * clears or we time out.
865          */
866         phy_control = BMCR_RESET;
867         err = tg3_writephy(tp, MII_BMCR, phy_control);
868         if (err != 0)
869                 return -EBUSY;
870
871         limit = 5000;
872         while (limit--) {
873                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874                 if (err != 0)
875                         return -EBUSY;
876
877                 if ((phy_control & BMCR_RESET) == 0) {
878                         udelay(40);
879                         break;
880                 }
881                 udelay(10);
882         }
883         if (limit <= 0)
884                 return -EBUSY;
885
886         return 0;
887 }
888
889 static int tg3_wait_macro_done(struct tg3 *tp)
890 {
891         int limit = 100;
892
893         while (limit--) {
894                 u32 tmp32;
895
896                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897                         if ((tmp32 & 0x1000) == 0)
898                                 break;
899                 }
900         }
901         if (limit <= 0)
902                 return -EBUSY;
903
904         return 0;
905 }
906
907 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 {
909         static const u32 test_pat[4][6] = {
910         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914         };
915         int chan;
916
917         for (chan = 0; chan < 4; chan++) {
918                 int i;
919
920                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921                              (chan * 0x2000) | 0x0200);
922                 tg3_writephy(tp, 0x16, 0x0002);
923
924                 for (i = 0; i < 6; i++)
925                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926                                      test_pat[chan][i]);
927
928                 tg3_writephy(tp, 0x16, 0x0202);
929                 if (tg3_wait_macro_done(tp)) {
930                         *resetp = 1;
931                         return -EBUSY;
932                 }
933
934                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935                              (chan * 0x2000) | 0x0200);
936                 tg3_writephy(tp, 0x16, 0x0082);
937                 if (tg3_wait_macro_done(tp)) {
938                         *resetp = 1;
939                         return -EBUSY;
940                 }
941
942                 tg3_writephy(tp, 0x16, 0x0802);
943                 if (tg3_wait_macro_done(tp)) {
944                         *resetp = 1;
945                         return -EBUSY;
946                 }
947
948                 for (i = 0; i < 6; i += 2) {
949                         u32 low, high;
950
951                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953                             tg3_wait_macro_done(tp)) {
954                                 *resetp = 1;
955                                 return -EBUSY;
956                         }
957                         low &= 0x7fff;
958                         high &= 0x000f;
959                         if (low != test_pat[chan][i] ||
960                             high != test_pat[chan][i+1]) {
961                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965                                 return -EBUSY;
966                         }
967                 }
968         }
969
970         return 0;
971 }
972
973 static int tg3_phy_reset_chanpat(struct tg3 *tp)
974 {
975         int chan;
976
977         for (chan = 0; chan < 4; chan++) {
978                 int i;
979
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981                              (chan * 0x2000) | 0x0200);
982                 tg3_writephy(tp, 0x16, 0x0002);
983                 for (i = 0; i < 6; i++)
984                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985                 tg3_writephy(tp, 0x16, 0x0202);
986                 if (tg3_wait_macro_done(tp))
987                         return -EBUSY;
988         }
989
990         return 0;
991 }
992
993 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 {
995         u32 reg32, phy9_orig;
996         int retries, do_phy_reset, err;
997
998         retries = 10;
999         do_phy_reset = 1;
1000         do {
1001                 if (do_phy_reset) {
1002                         err = tg3_bmcr_reset(tp);
1003                         if (err)
1004                                 return err;
1005                         do_phy_reset = 0;
1006                 }
1007
1008                 /* Disable transmitter and interrupt.  */
1009                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010                         continue;
1011
1012                 reg32 |= 0x3000;
1013                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015                 /* Set full-duplex, 1000 mbps.  */
1016                 tg3_writephy(tp, MII_BMCR,
1017                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019                 /* Set to master mode.  */
1020                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021                         continue;
1022
1023                 tg3_writephy(tp, MII_TG3_CTRL,
1024                              (MII_TG3_CTRL_AS_MASTER |
1025                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027                 /* Enable SM_DSP_CLOCK and 6dB.  */
1028                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030                 /* Block the PHY control access.  */
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035                 if (!err)
1036                         break;
1037         } while (--retries);
1038
1039         err = tg3_phy_reset_chanpat(tp);
1040         if (err)
1041                 return err;
1042
1043         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047         tg3_writephy(tp, 0x16, 0x0000);
1048
1049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051                 /* Set Extended packet length bit for jumbo frames */
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053         }
1054         else {
1055                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056         }
1057
1058         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061                 reg32 &= ~0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063         } else if (!err)
1064                 err = -EBUSY;
1065
1066         return err;
1067 }
1068
1069 static void tg3_link_report(struct tg3 *);
1070
1071 /* This will reset the tigon3 PHY if there is no valid
1072  * link unless the FORCE argument is non-zero.
1073  */
1074 static int tg3_phy_reset(struct tg3 *tp)
1075 {
1076         u32 phy_status;
1077         int err;
1078
1079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080                 u32 val;
1081
1082                 val = tr32(GRC_MISC_CFG);
1083                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084                 udelay(40);
1085         }
1086         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1087         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088         if (err != 0)
1089                 return -EBUSY;
1090
1091         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092                 netif_carrier_off(tp->dev);
1093                 tg3_link_report(tp);
1094         }
1095
1096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099                 err = tg3_phy_reset_5703_4_5(tp);
1100                 if (err)
1101                         return err;
1102                 goto out;
1103         }
1104
1105         err = tg3_bmcr_reset(tp);
1106         if (err)
1107                 return err;
1108
1109         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
1110             tp->pci_chip_rev_id == CHIPREV_ID_5761_A0) {
1111                 u32 val;
1112
1113                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1114                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1115                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1116                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1117                         udelay(40);
1118                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1119                 }
1120         }
1121
1122 out:
1123         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1124                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1125                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1126                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1127                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1128                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1129                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1130         }
1131         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1132                 tg3_writephy(tp, 0x1c, 0x8d68);
1133                 tg3_writephy(tp, 0x1c, 0x8d68);
1134         }
1135         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1136                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1137                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1138                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1139                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1140                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1141                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1142                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1143                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1144         }
1145         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1146                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1147                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1148                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1149                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1150                         tg3_writephy(tp, MII_TG3_TEST1,
1151                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1152                 } else
1153                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1154                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1155         }
1156         /* Set Extended packet length bit (bit 14) on all chips that */
1157         /* support jumbo frames */
1158         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1159                 /* Cannot do read-modify-write on 5401 */
1160                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1161         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1162                 u32 phy_reg;
1163
1164                 /* Set bit 14 with read-modify-write to preserve other bits */
1165                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1166                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1167                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1168         }
1169
1170         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1171          * jumbo frames transmission.
1172          */
1173         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1174                 u32 phy_reg;
1175
1176                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1177                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1178                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1179         }
1180
1181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1182                 /* adjust output voltage */
1183                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1184         }
1185
1186         tg3_phy_toggle_automdix(tp, 1);
1187         tg3_phy_set_wirespeed(tp);
1188         return 0;
1189 }
1190
1191 static void tg3_frob_aux_power(struct tg3 *tp)
1192 {
1193         struct tg3 *tp_peer = tp;
1194
1195         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1196                 return;
1197
1198         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1199             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1200                 struct net_device *dev_peer;
1201
1202                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1203                 /* remove_one() may have been run on the peer. */
1204                 if (!dev_peer)
1205                         tp_peer = tp;
1206                 else
1207                         tp_peer = netdev_priv(dev_peer);
1208         }
1209
1210         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1211             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1212             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1213             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1214                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1215                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1216                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1217                                     (GRC_LCLCTRL_GPIO_OE0 |
1218                                      GRC_LCLCTRL_GPIO_OE1 |
1219                                      GRC_LCLCTRL_GPIO_OE2 |
1220                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1221                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1222                                     100);
1223                 } else {
1224                         u32 no_gpio2;
1225                         u32 grc_local_ctrl = 0;
1226
1227                         if (tp_peer != tp &&
1228                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1229                                 return;
1230
1231                         /* Workaround to prevent overdrawing Amps. */
1232                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1233                             ASIC_REV_5714) {
1234                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1235                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1236                                             grc_local_ctrl, 100);
1237                         }
1238
1239                         /* On 5753 and variants, GPIO2 cannot be used. */
1240                         no_gpio2 = tp->nic_sram_data_cfg &
1241                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1242
1243                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1244                                          GRC_LCLCTRL_GPIO_OE1 |
1245                                          GRC_LCLCTRL_GPIO_OE2 |
1246                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1247                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1248                         if (no_gpio2) {
1249                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1250                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1251                         }
1252                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1253                                                     grc_local_ctrl, 100);
1254
1255                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1256
1257                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1258                                                     grc_local_ctrl, 100);
1259
1260                         if (!no_gpio2) {
1261                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1262                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1263                                             grc_local_ctrl, 100);
1264                         }
1265                 }
1266         } else {
1267                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1268                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1269                         if (tp_peer != tp &&
1270                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1271                                 return;
1272
1273                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1274                                     (GRC_LCLCTRL_GPIO_OE1 |
1275                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1276
1277                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1278                                     GRC_LCLCTRL_GPIO_OE1, 100);
1279
1280                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1281                                     (GRC_LCLCTRL_GPIO_OE1 |
1282                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1283                 }
1284         }
1285 }
1286
1287 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1288 {
1289         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1290                 return 1;
1291         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1292                 if (speed != SPEED_10)
1293                         return 1;
1294         } else if (speed == SPEED_10)
1295                 return 1;
1296
1297         return 0;
1298 }
1299
1300 static int tg3_setup_phy(struct tg3 *, int);
1301
1302 #define RESET_KIND_SHUTDOWN     0
1303 #define RESET_KIND_INIT         1
1304 #define RESET_KIND_SUSPEND      2
1305
1306 static void tg3_write_sig_post_reset(struct tg3 *, int);
1307 static int tg3_halt_cpu(struct tg3 *, u32);
1308 static int tg3_nvram_lock(struct tg3 *);
1309 static void tg3_nvram_unlock(struct tg3 *);
1310
1311 static void tg3_power_down_phy(struct tg3 *tp)
1312 {
1313         u32 val;
1314
1315         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1316                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1317                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1318                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1319
1320                         sg_dig_ctrl |=
1321                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1322                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1323                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1324                 }
1325                 return;
1326         }
1327
1328         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1329                 tg3_bmcr_reset(tp);
1330                 val = tr32(GRC_MISC_CFG);
1331                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1332                 udelay(40);
1333                 return;
1334         } else {
1335                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1336                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1337                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1338         }
1339
1340         /* The PHY should not be powered down on some chips because
1341          * of bugs.
1342          */
1343         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1344             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1345             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1346              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1347                 return;
1348
1349         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
1350             tp->pci_chip_rev_id == CHIPREV_ID_5761_A0) {
1351                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1352                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1353                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1354                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1355         }
1356
1357         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1358 }
1359
1360 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1361 {
1362         u32 misc_host_ctrl;
1363         u16 power_control, power_caps;
1364         int pm = tp->pm_cap;
1365
1366         /* Make sure register accesses (indirect or otherwise)
1367          * will function correctly.
1368          */
1369         pci_write_config_dword(tp->pdev,
1370                                TG3PCI_MISC_HOST_CTRL,
1371                                tp->misc_host_ctrl);
1372
1373         pci_read_config_word(tp->pdev,
1374                              pm + PCI_PM_CTRL,
1375                              &power_control);
1376         power_control |= PCI_PM_CTRL_PME_STATUS;
1377         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1378         switch (state) {
1379         case PCI_D0:
1380                 power_control |= 0;
1381                 pci_write_config_word(tp->pdev,
1382                                       pm + PCI_PM_CTRL,
1383                                       power_control);
1384                 udelay(100);    /* Delay after power state change */
1385
1386                 /* Switch out of Vaux if it is a NIC */
1387                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1388                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1389
1390                 return 0;
1391
1392         case PCI_D1:
1393                 power_control |= 1;
1394                 break;
1395
1396         case PCI_D2:
1397                 power_control |= 2;
1398                 break;
1399
1400         case PCI_D3hot:
1401                 power_control |= 3;
1402                 break;
1403
1404         default:
1405                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1406                        "requested.\n",
1407                        tp->dev->name, state);
1408                 return -EINVAL;
1409         };
1410
1411         power_control |= PCI_PM_CTRL_PME_ENABLE;
1412
1413         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1414         tw32(TG3PCI_MISC_HOST_CTRL,
1415              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1416
1417         if (tp->link_config.phy_is_low_power == 0) {
1418                 tp->link_config.phy_is_low_power = 1;
1419                 tp->link_config.orig_speed = tp->link_config.speed;
1420                 tp->link_config.orig_duplex = tp->link_config.duplex;
1421                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1422         }
1423
1424         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1425                 tp->link_config.speed = SPEED_10;
1426                 tp->link_config.duplex = DUPLEX_HALF;
1427                 tp->link_config.autoneg = AUTONEG_ENABLE;
1428                 tg3_setup_phy(tp, 0);
1429         }
1430
1431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1432                 u32 val;
1433
1434                 val = tr32(GRC_VCPU_EXT_CTRL);
1435                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1436         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1437                 int i;
1438                 u32 val;
1439
1440                 for (i = 0; i < 200; i++) {
1441                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1442                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1443                                 break;
1444                         msleep(1);
1445                 }
1446         }
1447         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1448                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1449                                                      WOL_DRV_STATE_SHUTDOWN |
1450                                                      WOL_DRV_WOL |
1451                                                      WOL_SET_MAGIC_PKT);
1452
1453         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1454
1455         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1456                 u32 mac_mode;
1457
1458                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1459                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1460                         udelay(40);
1461
1462                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1463                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1464                         else
1465                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1466
1467                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1468                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1469                             ASIC_REV_5700) {
1470                                 u32 speed = (tp->tg3_flags &
1471                                              TG3_FLAG_WOL_SPEED_100MB) ?
1472                                              SPEED_100 : SPEED_10;
1473                                 if (tg3_5700_link_polarity(tp, speed))
1474                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1475                                 else
1476                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1477                         }
1478                 } else {
1479                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1480                 }
1481
1482                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1483                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1484
1485                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1486                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1487                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1488
1489                 tw32_f(MAC_MODE, mac_mode);
1490                 udelay(100);
1491
1492                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1493                 udelay(10);
1494         }
1495
1496         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1497             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1498              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1499                 u32 base_val;
1500
1501                 base_val = tp->pci_clock_ctrl;
1502                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1503                              CLOCK_CTRL_TXCLK_DISABLE);
1504
1505                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1506                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1507         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1508                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1509                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1510                 /* do nothing */
1511         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1512                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1513                 u32 newbits1, newbits2;
1514
1515                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1516                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1517                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1518                                     CLOCK_CTRL_TXCLK_DISABLE |
1519                                     CLOCK_CTRL_ALTCLK);
1520                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1521                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1522                         newbits1 = CLOCK_CTRL_625_CORE;
1523                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1524                 } else {
1525                         newbits1 = CLOCK_CTRL_ALTCLK;
1526                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1527                 }
1528
1529                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1530                             40);
1531
1532                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1533                             40);
1534
1535                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1536                         u32 newbits3;
1537
1538                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1539                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1540                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1541                                             CLOCK_CTRL_TXCLK_DISABLE |
1542                                             CLOCK_CTRL_44MHZ_CORE);
1543                         } else {
1544                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1545                         }
1546
1547                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1548                                     tp->pci_clock_ctrl | newbits3, 40);
1549                 }
1550         }
1551
1552         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1553             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1554             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1555                 tg3_power_down_phy(tp);
1556
1557         tg3_frob_aux_power(tp);
1558
1559         /* Workaround for unstable PLL clock */
1560         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1561             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1562                 u32 val = tr32(0x7d00);
1563
1564                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1565                 tw32(0x7d00, val);
1566                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1567                         int err;
1568
1569                         err = tg3_nvram_lock(tp);
1570                         tg3_halt_cpu(tp, RX_CPU_BASE);
1571                         if (!err)
1572                                 tg3_nvram_unlock(tp);
1573                 }
1574         }
1575
1576         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1577
1578         /* Finally, set the new power state. */
1579         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1580         udelay(100);    /* Delay after power state change */
1581
1582         return 0;
1583 }
1584
1585 static void tg3_link_report(struct tg3 *tp)
1586 {
1587         if (!netif_carrier_ok(tp->dev)) {
1588                 if (netif_msg_link(tp))
1589                         printk(KERN_INFO PFX "%s: Link is down.\n",
1590                                tp->dev->name);
1591         } else if (netif_msg_link(tp)) {
1592                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1593                        tp->dev->name,
1594                        (tp->link_config.active_speed == SPEED_1000 ?
1595                         1000 :
1596                         (tp->link_config.active_speed == SPEED_100 ?
1597                          100 : 10)),
1598                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1599                         "full" : "half"));
1600
1601                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1602                        "%s for RX.\n",
1603                        tp->dev->name,
1604                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1605                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1606         }
1607 }
1608
1609 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1610 {
1611         u32 new_tg3_flags = 0;
1612         u32 old_rx_mode = tp->rx_mode;
1613         u32 old_tx_mode = tp->tx_mode;
1614
1615         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1616
1617                 /* Convert 1000BaseX flow control bits to 1000BaseT
1618                  * bits before resolving flow control.
1619                  */
1620                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1621                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1622                                        ADVERTISE_PAUSE_ASYM);
1623                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1624
1625                         if (local_adv & ADVERTISE_1000XPAUSE)
1626                                 local_adv |= ADVERTISE_PAUSE_CAP;
1627                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1628                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1629                         if (remote_adv & LPA_1000XPAUSE)
1630                                 remote_adv |= LPA_PAUSE_CAP;
1631                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1632                                 remote_adv |= LPA_PAUSE_ASYM;
1633                 }
1634
1635                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1636                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1637                                 if (remote_adv & LPA_PAUSE_CAP)
1638                                         new_tg3_flags |=
1639                                                 (TG3_FLAG_RX_PAUSE |
1640                                                 TG3_FLAG_TX_PAUSE);
1641                                 else if (remote_adv & LPA_PAUSE_ASYM)
1642                                         new_tg3_flags |=
1643                                                 (TG3_FLAG_RX_PAUSE);
1644                         } else {
1645                                 if (remote_adv & LPA_PAUSE_CAP)
1646                                         new_tg3_flags |=
1647                                                 (TG3_FLAG_RX_PAUSE |
1648                                                 TG3_FLAG_TX_PAUSE);
1649                         }
1650                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1651                         if ((remote_adv & LPA_PAUSE_CAP) &&
1652                         (remote_adv & LPA_PAUSE_ASYM))
1653                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1654                 }
1655
1656                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1657                 tp->tg3_flags |= new_tg3_flags;
1658         } else {
1659                 new_tg3_flags = tp->tg3_flags;
1660         }
1661
1662         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1663                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1664         else
1665                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1666
1667         if (old_rx_mode != tp->rx_mode) {
1668                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1669         }
1670
1671         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1672                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1673         else
1674                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1675
1676         if (old_tx_mode != tp->tx_mode) {
1677                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1678         }
1679 }
1680
1681 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1682 {
1683         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1684         case MII_TG3_AUX_STAT_10HALF:
1685                 *speed = SPEED_10;
1686                 *duplex = DUPLEX_HALF;
1687                 break;
1688
1689         case MII_TG3_AUX_STAT_10FULL:
1690                 *speed = SPEED_10;
1691                 *duplex = DUPLEX_FULL;
1692                 break;
1693
1694         case MII_TG3_AUX_STAT_100HALF:
1695                 *speed = SPEED_100;
1696                 *duplex = DUPLEX_HALF;
1697                 break;
1698
1699         case MII_TG3_AUX_STAT_100FULL:
1700                 *speed = SPEED_100;
1701                 *duplex = DUPLEX_FULL;
1702                 break;
1703
1704         case MII_TG3_AUX_STAT_1000HALF:
1705                 *speed = SPEED_1000;
1706                 *duplex = DUPLEX_HALF;
1707                 break;
1708
1709         case MII_TG3_AUX_STAT_1000FULL:
1710                 *speed = SPEED_1000;
1711                 *duplex = DUPLEX_FULL;
1712                 break;
1713
1714         default:
1715                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1716                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1717                                  SPEED_10;
1718                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1719                                   DUPLEX_HALF;
1720                         break;
1721                 }
1722                 *speed = SPEED_INVALID;
1723                 *duplex = DUPLEX_INVALID;
1724                 break;
1725         };
1726 }
1727
1728 static void tg3_phy_copper_begin(struct tg3 *tp)
1729 {
1730         u32 new_adv;
1731         int i;
1732
1733         if (tp->link_config.phy_is_low_power) {
1734                 /* Entering low power mode.  Disable gigabit and
1735                  * 100baseT advertisements.
1736                  */
1737                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1738
1739                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1740                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1741                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1742                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1743
1744                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1745         } else if (tp->link_config.speed == SPEED_INVALID) {
1746                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1747                         tp->link_config.advertising &=
1748                                 ~(ADVERTISED_1000baseT_Half |
1749                                   ADVERTISED_1000baseT_Full);
1750
1751                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1752                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1753                         new_adv |= ADVERTISE_10HALF;
1754                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1755                         new_adv |= ADVERTISE_10FULL;
1756                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1757                         new_adv |= ADVERTISE_100HALF;
1758                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1759                         new_adv |= ADVERTISE_100FULL;
1760                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1761
1762                 if (tp->link_config.advertising &
1763                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1764                         new_adv = 0;
1765                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1766                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1767                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1768                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1769                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1770                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1771                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1772                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1773                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1774                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1775                 } else {
1776                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1777                 }
1778         } else {
1779                 /* Asking for a specific link mode. */
1780                 if (tp->link_config.speed == SPEED_1000) {
1781                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1782                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1783
1784                         if (tp->link_config.duplex == DUPLEX_FULL)
1785                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1786                         else
1787                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1788                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1789                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1790                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1791                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1792                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1793                 } else {
1794                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1795
1796                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1797                         if (tp->link_config.speed == SPEED_100) {
1798                                 if (tp->link_config.duplex == DUPLEX_FULL)
1799                                         new_adv |= ADVERTISE_100FULL;
1800                                 else
1801                                         new_adv |= ADVERTISE_100HALF;
1802                         } else {
1803                                 if (tp->link_config.duplex == DUPLEX_FULL)
1804                                         new_adv |= ADVERTISE_10FULL;
1805                                 else
1806                                         new_adv |= ADVERTISE_10HALF;
1807                         }
1808                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1809                 }
1810         }
1811
1812         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1813             tp->link_config.speed != SPEED_INVALID) {
1814                 u32 bmcr, orig_bmcr;
1815
1816                 tp->link_config.active_speed = tp->link_config.speed;
1817                 tp->link_config.active_duplex = tp->link_config.duplex;
1818
1819                 bmcr = 0;
1820                 switch (tp->link_config.speed) {
1821                 default:
1822                 case SPEED_10:
1823                         break;
1824
1825                 case SPEED_100:
1826                         bmcr |= BMCR_SPEED100;
1827                         break;
1828
1829                 case SPEED_1000:
1830                         bmcr |= TG3_BMCR_SPEED1000;
1831                         break;
1832                 };
1833
1834                 if (tp->link_config.duplex == DUPLEX_FULL)
1835                         bmcr |= BMCR_FULLDPLX;
1836
1837                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1838                     (bmcr != orig_bmcr)) {
1839                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1840                         for (i = 0; i < 1500; i++) {
1841                                 u32 tmp;
1842
1843                                 udelay(10);
1844                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1845                                     tg3_readphy(tp, MII_BMSR, &tmp))
1846                                         continue;
1847                                 if (!(tmp & BMSR_LSTATUS)) {
1848                                         udelay(40);
1849                                         break;
1850                                 }
1851                         }
1852                         tg3_writephy(tp, MII_BMCR, bmcr);
1853                         udelay(40);
1854                 }
1855         } else {
1856                 tg3_writephy(tp, MII_BMCR,
1857                              BMCR_ANENABLE | BMCR_ANRESTART);
1858         }
1859 }
1860
1861 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1862 {
1863         int err;
1864
1865         /* Turn off tap power management. */
1866         /* Set Extended packet length bit */
1867         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1868
1869         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1870         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1871
1872         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1873         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1874
1875         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1876         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1877
1878         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1879         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1880
1881         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1882         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1883
1884         udelay(40);
1885
1886         return err;
1887 }
1888
1889 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1890 {
1891         u32 adv_reg, all_mask = 0;
1892
1893         if (mask & ADVERTISED_10baseT_Half)
1894                 all_mask |= ADVERTISE_10HALF;
1895         if (mask & ADVERTISED_10baseT_Full)
1896                 all_mask |= ADVERTISE_10FULL;
1897         if (mask & ADVERTISED_100baseT_Half)
1898                 all_mask |= ADVERTISE_100HALF;
1899         if (mask & ADVERTISED_100baseT_Full)
1900                 all_mask |= ADVERTISE_100FULL;
1901
1902         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1903                 return 0;
1904
1905         if ((adv_reg & all_mask) != all_mask)
1906                 return 0;
1907         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1908                 u32 tg3_ctrl;
1909
1910                 all_mask = 0;
1911                 if (mask & ADVERTISED_1000baseT_Half)
1912                         all_mask |= ADVERTISE_1000HALF;
1913                 if (mask & ADVERTISED_1000baseT_Full)
1914                         all_mask |= ADVERTISE_1000FULL;
1915
1916                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1917                         return 0;
1918
1919                 if ((tg3_ctrl & all_mask) != all_mask)
1920                         return 0;
1921         }
1922         return 1;
1923 }
1924
1925 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1926 {
1927         int current_link_up;
1928         u32 bmsr, dummy;
1929         u16 current_speed;
1930         u8 current_duplex;
1931         int i, err;
1932
1933         tw32(MAC_EVENT, 0);
1934
1935         tw32_f(MAC_STATUS,
1936              (MAC_STATUS_SYNC_CHANGED |
1937               MAC_STATUS_CFG_CHANGED |
1938               MAC_STATUS_MI_COMPLETION |
1939               MAC_STATUS_LNKSTATE_CHANGED));
1940         udelay(40);
1941
1942         tp->mi_mode = MAC_MI_MODE_BASE;
1943         tw32_f(MAC_MI_MODE, tp->mi_mode);
1944         udelay(80);
1945
1946         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1947
1948         /* Some third-party PHYs need to be reset on link going
1949          * down.
1950          */
1951         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1952              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1953              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1954             netif_carrier_ok(tp->dev)) {
1955                 tg3_readphy(tp, MII_BMSR, &bmsr);
1956                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1957                     !(bmsr & BMSR_LSTATUS))
1958                         force_reset = 1;
1959         }
1960         if (force_reset)
1961                 tg3_phy_reset(tp);
1962
1963         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1964                 tg3_readphy(tp, MII_BMSR, &bmsr);
1965                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1966                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1967                         bmsr = 0;
1968
1969                 if (!(bmsr & BMSR_LSTATUS)) {
1970                         err = tg3_init_5401phy_dsp(tp);
1971                         if (err)
1972                                 return err;
1973
1974                         tg3_readphy(tp, MII_BMSR, &bmsr);
1975                         for (i = 0; i < 1000; i++) {
1976                                 udelay(10);
1977                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1978                                     (bmsr & BMSR_LSTATUS)) {
1979                                         udelay(40);
1980                                         break;
1981                                 }
1982                         }
1983
1984                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1985                             !(bmsr & BMSR_LSTATUS) &&
1986                             tp->link_config.active_speed == SPEED_1000) {
1987                                 err = tg3_phy_reset(tp);
1988                                 if (!err)
1989                                         err = tg3_init_5401phy_dsp(tp);
1990                                 if (err)
1991                                         return err;
1992                         }
1993                 }
1994         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1995                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1996                 /* 5701 {A0,B0} CRC bug workaround */
1997                 tg3_writephy(tp, 0x15, 0x0a75);
1998                 tg3_writephy(tp, 0x1c, 0x8c68);
1999                 tg3_writephy(tp, 0x1c, 0x8d68);
2000                 tg3_writephy(tp, 0x1c, 0x8c68);
2001         }
2002
2003         /* Clear pending interrupts... */
2004         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2005         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2006
2007         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2008                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2009         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2010                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2011
2012         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2013             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2014                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2015                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2016                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2017                 else
2018                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2019         }
2020
2021         current_link_up = 0;
2022         current_speed = SPEED_INVALID;
2023         current_duplex = DUPLEX_INVALID;
2024
2025         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2026                 u32 val;
2027
2028                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2029                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2030                 if (!(val & (1 << 10))) {
2031                         val |= (1 << 10);
2032                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2033                         goto relink;
2034                 }
2035         }
2036
2037         bmsr = 0;
2038         for (i = 0; i < 100; i++) {
2039                 tg3_readphy(tp, MII_BMSR, &bmsr);
2040                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2041                     (bmsr & BMSR_LSTATUS))
2042                         break;
2043                 udelay(40);
2044         }
2045
2046         if (bmsr & BMSR_LSTATUS) {
2047                 u32 aux_stat, bmcr;
2048
2049                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2050                 for (i = 0; i < 2000; i++) {
2051                         udelay(10);
2052                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2053                             aux_stat)
2054                                 break;
2055                 }
2056
2057                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2058                                              &current_speed,
2059                                              &current_duplex);
2060
2061                 bmcr = 0;
2062                 for (i = 0; i < 200; i++) {
2063                         tg3_readphy(tp, MII_BMCR, &bmcr);
2064                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2065                                 continue;
2066                         if (bmcr && bmcr != 0x7fff)
2067                                 break;
2068                         udelay(10);
2069                 }
2070
2071                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2072                         if (bmcr & BMCR_ANENABLE) {
2073                                 current_link_up = 1;
2074
2075                                 /* Force autoneg restart if we are exiting
2076                                  * low power mode.
2077                                  */
2078                                 if (!tg3_copper_is_advertising_all(tp,
2079                                                 tp->link_config.advertising))
2080                                         current_link_up = 0;
2081                         } else {
2082                                 current_link_up = 0;
2083                         }
2084                 } else {
2085                         if (!(bmcr & BMCR_ANENABLE) &&
2086                             tp->link_config.speed == current_speed &&
2087                             tp->link_config.duplex == current_duplex) {
2088                                 current_link_up = 1;
2089                         } else {
2090                                 current_link_up = 0;
2091                         }
2092                 }
2093
2094                 tp->link_config.active_speed = current_speed;
2095                 tp->link_config.active_duplex = current_duplex;
2096         }
2097
2098         if (current_link_up == 1 &&
2099             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2100             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2101                 u32 local_adv, remote_adv;
2102
2103                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2104                         local_adv = 0;
2105                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2106
2107                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2108                         remote_adv = 0;
2109
2110                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2111
2112                 /* If we are not advertising full pause capability,
2113                  * something is wrong.  Bring the link down and reconfigure.
2114                  */
2115                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2116                         current_link_up = 0;
2117                 } else {
2118                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2119                 }
2120         }
2121 relink:
2122         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2123                 u32 tmp;
2124
2125                 tg3_phy_copper_begin(tp);
2126
2127                 tg3_readphy(tp, MII_BMSR, &tmp);
2128                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2129                     (tmp & BMSR_LSTATUS))
2130                         current_link_up = 1;
2131         }
2132
2133         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2134         if (current_link_up == 1) {
2135                 if (tp->link_config.active_speed == SPEED_100 ||
2136                     tp->link_config.active_speed == SPEED_10)
2137                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2138                 else
2139                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2140         } else
2141                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2142
2143         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2144         if (tp->link_config.active_duplex == DUPLEX_HALF)
2145                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2146
2147         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2148                 if (current_link_up == 1 &&
2149                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2150                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2151                 else
2152                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2153         }
2154
2155         /* ??? Without this setting Netgear GA302T PHY does not
2156          * ??? send/receive packets...
2157          */
2158         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2159             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2160                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2161                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2162                 udelay(80);
2163         }
2164
2165         tw32_f(MAC_MODE, tp->mac_mode);
2166         udelay(40);
2167
2168         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2169                 /* Polled via timer. */
2170                 tw32_f(MAC_EVENT, 0);
2171         } else {
2172                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2173         }
2174         udelay(40);
2175
2176         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2177             current_link_up == 1 &&
2178             tp->link_config.active_speed == SPEED_1000 &&
2179             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2180              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2181                 udelay(120);
2182                 tw32_f(MAC_STATUS,
2183                      (MAC_STATUS_SYNC_CHANGED |
2184                       MAC_STATUS_CFG_CHANGED));
2185                 udelay(40);
2186                 tg3_write_mem(tp,
2187                               NIC_SRAM_FIRMWARE_MBOX,
2188                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2189         }
2190
2191         if (current_link_up != netif_carrier_ok(tp->dev)) {
2192                 if (current_link_up)
2193                         netif_carrier_on(tp->dev);
2194                 else
2195                         netif_carrier_off(tp->dev);
2196                 tg3_link_report(tp);
2197         }
2198
2199         return 0;
2200 }
2201
2202 struct tg3_fiber_aneginfo {
2203         int state;
2204 #define ANEG_STATE_UNKNOWN              0
2205 #define ANEG_STATE_AN_ENABLE            1
2206 #define ANEG_STATE_RESTART_INIT         2
2207 #define ANEG_STATE_RESTART              3
2208 #define ANEG_STATE_DISABLE_LINK_OK      4
2209 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2210 #define ANEG_STATE_ABILITY_DETECT       6
2211 #define ANEG_STATE_ACK_DETECT_INIT      7
2212 #define ANEG_STATE_ACK_DETECT           8
2213 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2214 #define ANEG_STATE_COMPLETE_ACK         10
2215 #define ANEG_STATE_IDLE_DETECT_INIT     11
2216 #define ANEG_STATE_IDLE_DETECT          12
2217 #define ANEG_STATE_LINK_OK              13
2218 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2219 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2220
2221         u32 flags;
2222 #define MR_AN_ENABLE            0x00000001
2223 #define MR_RESTART_AN           0x00000002
2224 #define MR_AN_COMPLETE          0x00000004
2225 #define MR_PAGE_RX              0x00000008
2226 #define MR_NP_LOADED            0x00000010
2227 #define MR_TOGGLE_TX            0x00000020
2228 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2229 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2230 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2231 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2232 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2233 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2234 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2235 #define MR_TOGGLE_RX            0x00002000
2236 #define MR_NP_RX                0x00004000
2237
2238 #define MR_LINK_OK              0x80000000
2239
2240         unsigned long link_time, cur_time;
2241
2242         u32 ability_match_cfg;
2243         int ability_match_count;
2244
2245         char ability_match, idle_match, ack_match;
2246
2247         u32 txconfig, rxconfig;
2248 #define ANEG_CFG_NP             0x00000080
2249 #define ANEG_CFG_ACK            0x00000040
2250 #define ANEG_CFG_RF2            0x00000020
2251 #define ANEG_CFG_RF1            0x00000010
2252 #define ANEG_CFG_PS2            0x00000001
2253 #define ANEG_CFG_PS1            0x00008000
2254 #define ANEG_CFG_HD             0x00004000
2255 #define ANEG_CFG_FD             0x00002000
2256 #define ANEG_CFG_INVAL          0x00001f06
2257
2258 };
2259 #define ANEG_OK         0
2260 #define ANEG_DONE       1
2261 #define ANEG_TIMER_ENAB 2
2262 #define ANEG_FAILED     -1
2263
2264 #define ANEG_STATE_SETTLE_TIME  10000
2265
2266 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2267                                    struct tg3_fiber_aneginfo *ap)
2268 {
2269         unsigned long delta;
2270         u32 rx_cfg_reg;
2271         int ret;
2272
2273         if (ap->state == ANEG_STATE_UNKNOWN) {
2274                 ap->rxconfig = 0;
2275                 ap->link_time = 0;
2276                 ap->cur_time = 0;
2277                 ap->ability_match_cfg = 0;
2278                 ap->ability_match_count = 0;
2279                 ap->ability_match = 0;
2280                 ap->idle_match = 0;
2281                 ap->ack_match = 0;
2282         }
2283         ap->cur_time++;
2284
2285         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2286                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2287
2288                 if (rx_cfg_reg != ap->ability_match_cfg) {
2289                         ap->ability_match_cfg = rx_cfg_reg;
2290                         ap->ability_match = 0;
2291                         ap->ability_match_count = 0;
2292                 } else {
2293                         if (++ap->ability_match_count > 1) {
2294                                 ap->ability_match = 1;
2295                                 ap->ability_match_cfg = rx_cfg_reg;
2296                         }
2297                 }
2298                 if (rx_cfg_reg & ANEG_CFG_ACK)
2299                         ap->ack_match = 1;
2300                 else
2301                         ap->ack_match = 0;
2302
2303                 ap->idle_match = 0;
2304         } else {
2305                 ap->idle_match = 1;
2306                 ap->ability_match_cfg = 0;
2307                 ap->ability_match_count = 0;
2308                 ap->ability_match = 0;
2309                 ap->ack_match = 0;
2310
2311                 rx_cfg_reg = 0;
2312         }
2313
2314         ap->rxconfig = rx_cfg_reg;
2315         ret = ANEG_OK;
2316
2317         switch(ap->state) {
2318         case ANEG_STATE_UNKNOWN:
2319                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2320                         ap->state = ANEG_STATE_AN_ENABLE;
2321
2322                 /* fallthru */
2323         case ANEG_STATE_AN_ENABLE:
2324                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2325                 if (ap->flags & MR_AN_ENABLE) {
2326                         ap->link_time = 0;
2327                         ap->cur_time = 0;
2328                         ap->ability_match_cfg = 0;
2329                         ap->ability_match_count = 0;
2330                         ap->ability_match = 0;
2331                         ap->idle_match = 0;
2332                         ap->ack_match = 0;
2333
2334                         ap->state = ANEG_STATE_RESTART_INIT;
2335                 } else {
2336                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2337                 }
2338                 break;
2339
2340         case ANEG_STATE_RESTART_INIT:
2341                 ap->link_time = ap->cur_time;
2342                 ap->flags &= ~(MR_NP_LOADED);
2343                 ap->txconfig = 0;
2344                 tw32(MAC_TX_AUTO_NEG, 0);
2345                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2346                 tw32_f(MAC_MODE, tp->mac_mode);
2347                 udelay(40);
2348
2349                 ret = ANEG_TIMER_ENAB;
2350                 ap->state = ANEG_STATE_RESTART;
2351
2352                 /* fallthru */
2353         case ANEG_STATE_RESTART:
2354                 delta = ap->cur_time - ap->link_time;
2355                 if (delta > ANEG_STATE_SETTLE_TIME) {
2356                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2357                 } else {
2358                         ret = ANEG_TIMER_ENAB;
2359                 }
2360                 break;
2361
2362         case ANEG_STATE_DISABLE_LINK_OK:
2363                 ret = ANEG_DONE;
2364                 break;
2365
2366         case ANEG_STATE_ABILITY_DETECT_INIT:
2367                 ap->flags &= ~(MR_TOGGLE_TX);
2368                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2369                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2370                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2371                 tw32_f(MAC_MODE, tp->mac_mode);
2372                 udelay(40);
2373
2374                 ap->state = ANEG_STATE_ABILITY_DETECT;
2375                 break;
2376
2377         case ANEG_STATE_ABILITY_DETECT:
2378                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2379                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2380                 }
2381                 break;
2382
2383         case ANEG_STATE_ACK_DETECT_INIT:
2384                 ap->txconfig |= ANEG_CFG_ACK;
2385                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2386                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2387                 tw32_f(MAC_MODE, tp->mac_mode);
2388                 udelay(40);
2389
2390                 ap->state = ANEG_STATE_ACK_DETECT;
2391
2392                 /* fallthru */
2393         case ANEG_STATE_ACK_DETECT:
2394                 if (ap->ack_match != 0) {
2395                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2396                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2397                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2398                         } else {
2399                                 ap->state = ANEG_STATE_AN_ENABLE;
2400                         }
2401                 } else if (ap->ability_match != 0 &&
2402                            ap->rxconfig == 0) {
2403                         ap->state = ANEG_STATE_AN_ENABLE;
2404                 }
2405                 break;
2406
2407         case ANEG_STATE_COMPLETE_ACK_INIT:
2408                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2409                         ret = ANEG_FAILED;
2410                         break;
2411                 }
2412                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2413                                MR_LP_ADV_HALF_DUPLEX |
2414                                MR_LP_ADV_SYM_PAUSE |
2415                                MR_LP_ADV_ASYM_PAUSE |
2416                                MR_LP_ADV_REMOTE_FAULT1 |
2417                                MR_LP_ADV_REMOTE_FAULT2 |
2418                                MR_LP_ADV_NEXT_PAGE |
2419                                MR_TOGGLE_RX |
2420                                MR_NP_RX);
2421                 if (ap->rxconfig & ANEG_CFG_FD)
2422                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2423                 if (ap->rxconfig & ANEG_CFG_HD)
2424                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2425                 if (ap->rxconfig & ANEG_CFG_PS1)
2426                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2427                 if (ap->rxconfig & ANEG_CFG_PS2)
2428                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2429                 if (ap->rxconfig & ANEG_CFG_RF1)
2430                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2431                 if (ap->rxconfig & ANEG_CFG_RF2)
2432                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2433                 if (ap->rxconfig & ANEG_CFG_NP)
2434                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2435
2436                 ap->link_time = ap->cur_time;
2437
2438                 ap->flags ^= (MR_TOGGLE_TX);
2439                 if (ap->rxconfig & 0x0008)
2440                         ap->flags |= MR_TOGGLE_RX;
2441                 if (ap->rxconfig & ANEG_CFG_NP)
2442                         ap->flags |= MR_NP_RX;
2443                 ap->flags |= MR_PAGE_RX;
2444
2445                 ap->state = ANEG_STATE_COMPLETE_ACK;
2446                 ret = ANEG_TIMER_ENAB;
2447                 break;
2448
2449         case ANEG_STATE_COMPLETE_ACK:
2450                 if (ap->ability_match != 0 &&
2451                     ap->rxconfig == 0) {
2452                         ap->state = ANEG_STATE_AN_ENABLE;
2453                         break;
2454                 }
2455                 delta = ap->cur_time - ap->link_time;
2456                 if (delta > ANEG_STATE_SETTLE_TIME) {
2457                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2458                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2459                         } else {
2460                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2461                                     !(ap->flags & MR_NP_RX)) {
2462                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2463                                 } else {
2464                                         ret = ANEG_FAILED;
2465                                 }
2466                         }
2467                 }
2468                 break;
2469
2470         case ANEG_STATE_IDLE_DETECT_INIT:
2471                 ap->link_time = ap->cur_time;
2472                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2473                 tw32_f(MAC_MODE, tp->mac_mode);
2474                 udelay(40);
2475
2476                 ap->state = ANEG_STATE_IDLE_DETECT;
2477                 ret = ANEG_TIMER_ENAB;
2478                 break;
2479
2480         case ANEG_STATE_IDLE_DETECT:
2481                 if (ap->ability_match != 0 &&
2482                     ap->rxconfig == 0) {
2483                         ap->state = ANEG_STATE_AN_ENABLE;
2484                         break;
2485                 }
2486                 delta = ap->cur_time - ap->link_time;
2487                 if (delta > ANEG_STATE_SETTLE_TIME) {
2488                         /* XXX another gem from the Broadcom driver :( */
2489                         ap->state = ANEG_STATE_LINK_OK;
2490                 }
2491                 break;
2492
2493         case ANEG_STATE_LINK_OK:
2494                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2495                 ret = ANEG_DONE;
2496                 break;
2497
2498         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2499                 /* ??? unimplemented */
2500                 break;
2501
2502         case ANEG_STATE_NEXT_PAGE_WAIT:
2503                 /* ??? unimplemented */
2504                 break;
2505
2506         default:
2507                 ret = ANEG_FAILED;
2508                 break;
2509         };
2510
2511         return ret;
2512 }
2513
2514 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2515 {
2516         int res = 0;
2517         struct tg3_fiber_aneginfo aninfo;
2518         int status = ANEG_FAILED;
2519         unsigned int tick;
2520         u32 tmp;
2521
2522         tw32_f(MAC_TX_AUTO_NEG, 0);
2523
2524         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2525         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2526         udelay(40);
2527
2528         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2529         udelay(40);
2530
2531         memset(&aninfo, 0, sizeof(aninfo));
2532         aninfo.flags |= MR_AN_ENABLE;
2533         aninfo.state = ANEG_STATE_UNKNOWN;
2534         aninfo.cur_time = 0;
2535         tick = 0;
2536         while (++tick < 195000) {
2537                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2538                 if (status == ANEG_DONE || status == ANEG_FAILED)
2539                         break;
2540
2541                 udelay(1);
2542         }
2543
2544         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2545         tw32_f(MAC_MODE, tp->mac_mode);
2546         udelay(40);
2547
2548         *flags = aninfo.flags;
2549
2550         if (status == ANEG_DONE &&
2551             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2552                              MR_LP_ADV_FULL_DUPLEX)))
2553                 res = 1;
2554
2555         return res;
2556 }
2557
2558 static void tg3_init_bcm8002(struct tg3 *tp)
2559 {
2560         u32 mac_status = tr32(MAC_STATUS);
2561         int i;
2562
2563         /* Reset when initting first time or we have a link. */
2564         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2565             !(mac_status & MAC_STATUS_PCS_SYNCED))
2566                 return;
2567
2568         /* Set PLL lock range. */
2569         tg3_writephy(tp, 0x16, 0x8007);
2570
2571         /* SW reset */
2572         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2573
2574         /* Wait for reset to complete. */
2575         /* XXX schedule_timeout() ... */
2576         for (i = 0; i < 500; i++)
2577                 udelay(10);
2578
2579         /* Config mode; select PMA/Ch 1 regs. */
2580         tg3_writephy(tp, 0x10, 0x8411);
2581
2582         /* Enable auto-lock and comdet, select txclk for tx. */
2583         tg3_writephy(tp, 0x11, 0x0a10);
2584
2585         tg3_writephy(tp, 0x18, 0x00a0);
2586         tg3_writephy(tp, 0x16, 0x41ff);
2587
2588         /* Assert and deassert POR. */
2589         tg3_writephy(tp, 0x13, 0x0400);
2590         udelay(40);
2591         tg3_writephy(tp, 0x13, 0x0000);
2592
2593         tg3_writephy(tp, 0x11, 0x0a50);
2594         udelay(40);
2595         tg3_writephy(tp, 0x11, 0x0a10);
2596
2597         /* Wait for signal to stabilize */
2598         /* XXX schedule_timeout() ... */
2599         for (i = 0; i < 15000; i++)
2600                 udelay(10);
2601
2602         /* Deselect the channel register so we can read the PHYID
2603          * later.
2604          */
2605         tg3_writephy(tp, 0x10, 0x8011);
2606 }
2607
2608 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2609 {
2610         u32 sg_dig_ctrl, sg_dig_status;
2611         u32 serdes_cfg, expected_sg_dig_ctrl;
2612         int workaround, port_a;
2613         int current_link_up;
2614
2615         serdes_cfg = 0;
2616         expected_sg_dig_ctrl = 0;
2617         workaround = 0;
2618         port_a = 1;
2619         current_link_up = 0;
2620
2621         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2622             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2623                 workaround = 1;
2624                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2625                         port_a = 0;
2626
2627                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2628                 /* preserve bits 20-23 for voltage regulator */
2629                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2630         }
2631
2632         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2633
2634         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2635                 if (sg_dig_ctrl & (1 << 31)) {
2636                         if (workaround) {
2637                                 u32 val = serdes_cfg;
2638
2639                                 if (port_a)
2640                                         val |= 0xc010000;
2641                                 else
2642                                         val |= 0x4010000;
2643                                 tw32_f(MAC_SERDES_CFG, val);
2644                         }
2645                         tw32_f(SG_DIG_CTRL, 0x01388400);
2646                 }
2647                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2648                         tg3_setup_flow_control(tp, 0, 0);
2649                         current_link_up = 1;
2650                 }
2651                 goto out;
2652         }
2653
2654         /* Want auto-negotiation.  */
2655         expected_sg_dig_ctrl = 0x81388400;
2656
2657         /* Pause capability */
2658         expected_sg_dig_ctrl |= (1 << 11);
2659
2660         /* Asymettric pause */
2661         expected_sg_dig_ctrl |= (1 << 12);
2662
2663         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2664                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2665                     tp->serdes_counter &&
2666                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2667                                     MAC_STATUS_RCVD_CFG)) ==
2668                      MAC_STATUS_PCS_SYNCED)) {
2669                         tp->serdes_counter--;
2670                         current_link_up = 1;
2671                         goto out;
2672                 }
2673 restart_autoneg:
2674                 if (workaround)
2675                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2676                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2677                 udelay(5);
2678                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2679
2680                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2681                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2682         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2683                                  MAC_STATUS_SIGNAL_DET)) {
2684                 sg_dig_status = tr32(SG_DIG_STATUS);
2685                 mac_status = tr32(MAC_STATUS);
2686
2687                 if ((sg_dig_status & (1 << 1)) &&
2688                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2689                         u32 local_adv, remote_adv;
2690
2691                         local_adv = ADVERTISE_PAUSE_CAP;
2692                         remote_adv = 0;
2693                         if (sg_dig_status & (1 << 19))
2694                                 remote_adv |= LPA_PAUSE_CAP;
2695                         if (sg_dig_status & (1 << 20))
2696                                 remote_adv |= LPA_PAUSE_ASYM;
2697
2698                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2699                         current_link_up = 1;
2700                         tp->serdes_counter = 0;
2701                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2702                 } else if (!(sg_dig_status & (1 << 1))) {
2703                         if (tp->serdes_counter)
2704                                 tp->serdes_counter--;
2705                         else {
2706                                 if (workaround) {
2707                                         u32 val = serdes_cfg;
2708
2709                                         if (port_a)
2710                                                 val |= 0xc010000;
2711                                         else
2712                                                 val |= 0x4010000;
2713
2714                                         tw32_f(MAC_SERDES_CFG, val);
2715                                 }
2716
2717                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2718                                 udelay(40);
2719
2720                                 /* Link parallel detection - link is up */
2721                                 /* only if we have PCS_SYNC and not */
2722                                 /* receiving config code words */
2723                                 mac_status = tr32(MAC_STATUS);
2724                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2725                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2726                                         tg3_setup_flow_control(tp, 0, 0);
2727                                         current_link_up = 1;
2728                                         tp->tg3_flags2 |=
2729                                                 TG3_FLG2_PARALLEL_DETECT;
2730                                         tp->serdes_counter =
2731                                                 SERDES_PARALLEL_DET_TIMEOUT;
2732                                 } else
2733                                         goto restart_autoneg;
2734                         }
2735                 }
2736         } else {
2737                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2738                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2739         }
2740
2741 out:
2742         return current_link_up;
2743 }
2744
2745 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2746 {
2747         int current_link_up = 0;
2748
2749         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2750                 goto out;
2751
2752         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2753                 u32 flags;
2754                 int i;
2755
2756                 if (fiber_autoneg(tp, &flags)) {
2757                         u32 local_adv, remote_adv;
2758
2759                         local_adv = ADVERTISE_PAUSE_CAP;
2760                         remote_adv = 0;
2761                         if (flags & MR_LP_ADV_SYM_PAUSE)
2762                                 remote_adv |= LPA_PAUSE_CAP;
2763                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2764                                 remote_adv |= LPA_PAUSE_ASYM;
2765
2766                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2767
2768                         current_link_up = 1;
2769                 }
2770                 for (i = 0; i < 30; i++) {
2771                         udelay(20);
2772                         tw32_f(MAC_STATUS,
2773                                (MAC_STATUS_SYNC_CHANGED |
2774                                 MAC_STATUS_CFG_CHANGED));
2775                         udelay(40);
2776                         if ((tr32(MAC_STATUS) &
2777                              (MAC_STATUS_SYNC_CHANGED |
2778                               MAC_STATUS_CFG_CHANGED)) == 0)
2779                                 break;
2780                 }
2781
2782                 mac_status = tr32(MAC_STATUS);
2783                 if (current_link_up == 0 &&
2784                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2785                     !(mac_status & MAC_STATUS_RCVD_CFG))
2786                         current_link_up = 1;
2787         } else {
2788                 /* Forcing 1000FD link up. */
2789                 current_link_up = 1;
2790
2791                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2792                 udelay(40);
2793
2794                 tw32_f(MAC_MODE, tp->mac_mode);
2795                 udelay(40);
2796         }
2797
2798 out:
2799         return current_link_up;
2800 }
2801
2802 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2803 {
2804         u32 orig_pause_cfg;
2805         u16 orig_active_speed;
2806         u8 orig_active_duplex;
2807         u32 mac_status;
2808         int current_link_up;
2809         int i;
2810
2811         orig_pause_cfg =
2812                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2813                                   TG3_FLAG_TX_PAUSE));
2814         orig_active_speed = tp->link_config.active_speed;
2815         orig_active_duplex = tp->link_config.active_duplex;
2816
2817         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2818             netif_carrier_ok(tp->dev) &&
2819             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2820                 mac_status = tr32(MAC_STATUS);
2821                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2822                                MAC_STATUS_SIGNAL_DET |
2823                                MAC_STATUS_CFG_CHANGED |
2824                                MAC_STATUS_RCVD_CFG);
2825                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2826                                    MAC_STATUS_SIGNAL_DET)) {
2827                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2828                                             MAC_STATUS_CFG_CHANGED));
2829                         return 0;
2830                 }
2831         }
2832
2833         tw32_f(MAC_TX_AUTO_NEG, 0);
2834
2835         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2836         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2837         tw32_f(MAC_MODE, tp->mac_mode);
2838         udelay(40);
2839
2840         if (tp->phy_id == PHY_ID_BCM8002)
2841                 tg3_init_bcm8002(tp);
2842
2843         /* Enable link change event even when serdes polling.  */
2844         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2845         udelay(40);
2846
2847         current_link_up = 0;
2848         mac_status = tr32(MAC_STATUS);
2849
2850         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2851                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2852         else
2853                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2854
2855         tp->hw_status->status =
2856                 (SD_STATUS_UPDATED |
2857                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2858
2859         for (i = 0; i < 100; i++) {
2860                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2861                                     MAC_STATUS_CFG_CHANGED));
2862                 udelay(5);
2863                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2864                                          MAC_STATUS_CFG_CHANGED |
2865                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2866                         break;
2867         }
2868
2869         mac_status = tr32(MAC_STATUS);
2870         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2871                 current_link_up = 0;
2872                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2873                     tp->serdes_counter == 0) {
2874                         tw32_f(MAC_MODE, (tp->mac_mode |
2875                                           MAC_MODE_SEND_CONFIGS));
2876                         udelay(1);
2877                         tw32_f(MAC_MODE, tp->mac_mode);
2878                 }
2879         }
2880
2881         if (current_link_up == 1) {
2882                 tp->link_config.active_speed = SPEED_1000;
2883                 tp->link_config.active_duplex = DUPLEX_FULL;
2884                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2885                                     LED_CTRL_LNKLED_OVERRIDE |
2886                                     LED_CTRL_1000MBPS_ON));
2887         } else {
2888                 tp->link_config.active_speed = SPEED_INVALID;
2889                 tp->link_config.active_duplex = DUPLEX_INVALID;
2890                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2891                                     LED_CTRL_LNKLED_OVERRIDE |
2892                                     LED_CTRL_TRAFFIC_OVERRIDE));
2893         }
2894
2895         if (current_link_up != netif_carrier_ok(tp->dev)) {
2896                 if (current_link_up)
2897                         netif_carrier_on(tp->dev);
2898                 else
2899                         netif_carrier_off(tp->dev);
2900                 tg3_link_report(tp);
2901         } else {
2902                 u32 now_pause_cfg =
2903                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2904                                          TG3_FLAG_TX_PAUSE);
2905                 if (orig_pause_cfg != now_pause_cfg ||
2906                     orig_active_speed != tp->link_config.active_speed ||
2907                     orig_active_duplex != tp->link_config.active_duplex)
2908                         tg3_link_report(tp);
2909         }
2910
2911         return 0;
2912 }
2913
2914 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2915 {
2916         int current_link_up, err = 0;
2917         u32 bmsr, bmcr;
2918         u16 current_speed;
2919         u8 current_duplex;
2920
2921         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2922         tw32_f(MAC_MODE, tp->mac_mode);
2923         udelay(40);
2924
2925         tw32(MAC_EVENT, 0);
2926
2927         tw32_f(MAC_STATUS,
2928              (MAC_STATUS_SYNC_CHANGED |
2929               MAC_STATUS_CFG_CHANGED |
2930               MAC_STATUS_MI_COMPLETION |
2931               MAC_STATUS_LNKSTATE_CHANGED));
2932         udelay(40);
2933
2934         if (force_reset)
2935                 tg3_phy_reset(tp);
2936
2937         current_link_up = 0;
2938         current_speed = SPEED_INVALID;
2939         current_duplex = DUPLEX_INVALID;
2940
2941         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2942         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2944                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2945                         bmsr |= BMSR_LSTATUS;
2946                 else
2947                         bmsr &= ~BMSR_LSTATUS;
2948         }
2949
2950         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2951
2952         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2953             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2954                 /* do nothing, just check for link up at the end */
2955         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2956                 u32 adv, new_adv;
2957
2958                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2959                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2960                                   ADVERTISE_1000XPAUSE |
2961                                   ADVERTISE_1000XPSE_ASYM |
2962                                   ADVERTISE_SLCT);
2963
2964                 /* Always advertise symmetric PAUSE just like copper */
2965                 new_adv |= ADVERTISE_1000XPAUSE;
2966
2967                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2968                         new_adv |= ADVERTISE_1000XHALF;
2969                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2970                         new_adv |= ADVERTISE_1000XFULL;
2971
2972                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2973                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2974                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2975                         tg3_writephy(tp, MII_BMCR, bmcr);
2976
2977                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2978                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2979                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2980
2981                         return err;
2982                 }
2983         } else {
2984                 u32 new_bmcr;
2985
2986                 bmcr &= ~BMCR_SPEED1000;
2987                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2988
2989                 if (tp->link_config.duplex == DUPLEX_FULL)
2990                         new_bmcr |= BMCR_FULLDPLX;
2991
2992                 if (new_bmcr != bmcr) {
2993                         /* BMCR_SPEED1000 is a reserved bit that needs
2994                          * to be set on write.
2995                          */
2996                         new_bmcr |= BMCR_SPEED1000;
2997
2998                         /* Force a linkdown */
2999                         if (netif_carrier_ok(tp->dev)) {
3000                                 u32 adv;
3001
3002                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3003                                 adv &= ~(ADVERTISE_1000XFULL |
3004                                          ADVERTISE_1000XHALF |
3005                                          ADVERTISE_SLCT);
3006                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3007                                 tg3_writephy(tp, MII_BMCR, bmcr |
3008                                                            BMCR_ANRESTART |
3009                                                            BMCR_ANENABLE);
3010                                 udelay(10);
3011                                 netif_carrier_off(tp->dev);
3012                         }
3013                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3014                         bmcr = new_bmcr;
3015                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3016                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3017                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3018                             ASIC_REV_5714) {
3019                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3020                                         bmsr |= BMSR_LSTATUS;
3021                                 else
3022                                         bmsr &= ~BMSR_LSTATUS;
3023                         }
3024                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3025                 }
3026         }
3027
3028         if (bmsr & BMSR_LSTATUS) {
3029                 current_speed = SPEED_1000;
3030                 current_link_up = 1;
3031                 if (bmcr & BMCR_FULLDPLX)
3032                         current_duplex = DUPLEX_FULL;
3033                 else
3034                         current_duplex = DUPLEX_HALF;
3035
3036                 if (bmcr & BMCR_ANENABLE) {
3037                         u32 local_adv, remote_adv, common;
3038
3039                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3040                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3041                         common = local_adv & remote_adv;
3042                         if (common & (ADVERTISE_1000XHALF |
3043                                       ADVERTISE_1000XFULL)) {
3044                                 if (common & ADVERTISE_1000XFULL)
3045                                         current_duplex = DUPLEX_FULL;
3046                                 else
3047                                         current_duplex = DUPLEX_HALF;
3048
3049                                 tg3_setup_flow_control(tp, local_adv,
3050                                                        remote_adv);
3051                         }
3052                         else
3053                                 current_link_up = 0;
3054                 }
3055         }
3056
3057         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3058         if (tp->link_config.active_duplex == DUPLEX_HALF)
3059                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3060
3061         tw32_f(MAC_MODE, tp->mac_mode);
3062         udelay(40);
3063
3064         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3065
3066         tp->link_config.active_speed = current_speed;
3067         tp->link_config.active_duplex = current_duplex;
3068
3069         if (current_link_up != netif_carrier_ok(tp->dev)) {
3070                 if (current_link_up)
3071                         netif_carrier_on(tp->dev);
3072                 else {
3073                         netif_carrier_off(tp->dev);
3074                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3075                 }
3076                 tg3_link_report(tp);
3077         }
3078         return err;
3079 }
3080
3081 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3082 {
3083         if (tp->serdes_counter) {
3084                 /* Give autoneg time to complete. */
3085                 tp->serdes_counter--;
3086                 return;
3087         }
3088         if (!netif_carrier_ok(tp->dev) &&
3089             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3090                 u32 bmcr;
3091
3092                 tg3_readphy(tp, MII_BMCR, &bmcr);
3093                 if (bmcr & BMCR_ANENABLE) {
3094                         u32 phy1, phy2;
3095
3096                         /* Select shadow register 0x1f */
3097                         tg3_writephy(tp, 0x1c, 0x7c00);
3098                         tg3_readphy(tp, 0x1c, &phy1);
3099
3100                         /* Select expansion interrupt status register */
3101                         tg3_writephy(tp, 0x17, 0x0f01);
3102                         tg3_readphy(tp, 0x15, &phy2);
3103                         tg3_readphy(tp, 0x15, &phy2);
3104
3105                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3106                                 /* We have signal detect and not receiving
3107                                  * config code words, link is up by parallel
3108                                  * detection.
3109                                  */
3110
3111                                 bmcr &= ~BMCR_ANENABLE;
3112                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3113                                 tg3_writephy(tp, MII_BMCR, bmcr);
3114                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3115                         }
3116                 }
3117         }
3118         else if (netif_carrier_ok(tp->dev) &&
3119                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3120                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3121                 u32 phy2;
3122
3123                 /* Select expansion interrupt status register */
3124                 tg3_writephy(tp, 0x17, 0x0f01);
3125                 tg3_readphy(tp, 0x15, &phy2);
3126                 if (phy2 & 0x20) {
3127                         u32 bmcr;
3128
3129                         /* Config code words received, turn on autoneg. */
3130                         tg3_readphy(tp, MII_BMCR, &bmcr);
3131                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3132
3133                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3134
3135                 }
3136         }
3137 }
3138
3139 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3140 {
3141         int err;
3142
3143         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3144                 err = tg3_setup_fiber_phy(tp, force_reset);
3145         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3146                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3147         } else {
3148                 err = tg3_setup_copper_phy(tp, force_reset);
3149         }
3150
3151         if (tp->link_config.active_speed == SPEED_1000 &&
3152             tp->link_config.active_duplex == DUPLEX_HALF)
3153                 tw32(MAC_TX_LENGTHS,
3154                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3155                       (6 << TX_LENGTHS_IPG_SHIFT) |
3156                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3157         else
3158                 tw32(MAC_TX_LENGTHS,
3159                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3160                       (6 << TX_LENGTHS_IPG_SHIFT) |
3161                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3162
3163         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3164                 if (netif_carrier_ok(tp->dev)) {
3165                         tw32(HOSTCC_STAT_COAL_TICKS,
3166                              tp->coal.stats_block_coalesce_usecs);
3167                 } else {
3168                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3169                 }
3170         }
3171
3172         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3173                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3174                 if (!netif_carrier_ok(tp->dev))
3175                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3176                               tp->pwrmgmt_thresh;
3177                 else
3178                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3179                 tw32(PCIE_PWR_MGMT_THRESH, val);
3180         }
3181
3182         return err;
3183 }
3184
3185 /* This is called whenever we suspect that the system chipset is re-
3186  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3187  * is bogus tx completions. We try to recover by setting the
3188  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3189  * in the workqueue.
3190  */
3191 static void tg3_tx_recover(struct tg3 *tp)
3192 {
3193         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3194                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3195
3196         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3197                "mapped I/O cycles to the network device, attempting to "
3198                "recover. Please report the problem to the driver maintainer "
3199                "and include system chipset information.\n", tp->dev->name);
3200
3201         spin_lock(&tp->lock);
3202         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3203         spin_unlock(&tp->lock);
3204 }
3205
3206 static inline u32 tg3_tx_avail(struct tg3 *tp)
3207 {
3208         smp_mb();
3209         return (tp->tx_pending -
3210                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3211 }
3212
3213 /* Tigon3 never reports partial packet sends.  So we do not
3214  * need special logic to handle SKBs that have not had all
3215  * of their frags sent yet, like SunGEM does.
3216  */
3217 static void tg3_tx(struct tg3 *tp)
3218 {
3219         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3220         u32 sw_idx = tp->tx_cons;
3221
3222         while (sw_idx != hw_idx) {
3223                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3224                 struct sk_buff *skb = ri->skb;
3225                 int i, tx_bug = 0;
3226
3227                 if (unlikely(skb == NULL)) {
3228                         tg3_tx_recover(tp);
3229                         return;
3230                 }
3231
3232                 pci_unmap_single(tp->pdev,
3233                                  pci_unmap_addr(ri, mapping),
3234                                  skb_headlen(skb),
3235                                  PCI_DMA_TODEVICE);
3236
3237                 ri->skb = NULL;
3238
3239                 sw_idx = NEXT_TX(sw_idx);
3240
3241                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3242                         ri = &tp->tx_buffers[sw_idx];
3243                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3244                                 tx_bug = 1;
3245
3246                         pci_unmap_page(tp->pdev,
3247                                        pci_unmap_addr(ri, mapping),
3248                                        skb_shinfo(skb)->frags[i].size,
3249                                        PCI_DMA_TODEVICE);
3250
3251                         sw_idx = NEXT_TX(sw_idx);
3252                 }
3253
3254                 dev_kfree_skb(skb);
3255
3256                 if (unlikely(tx_bug)) {
3257                         tg3_tx_recover(tp);
3258                         return;
3259                 }
3260         }
3261
3262         tp->tx_cons = sw_idx;
3263
3264         /* Need to make the tx_cons update visible to tg3_start_xmit()
3265          * before checking for netif_queue_stopped().  Without the
3266          * memory barrier, there is a small possibility that tg3_start_xmit()
3267          * will miss it and cause the queue to be stopped forever.
3268          */
3269         smp_mb();
3270
3271         if (unlikely(netif_queue_stopped(tp->dev) &&
3272                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3273                 netif_tx_lock(tp->dev);
3274                 if (netif_queue_stopped(tp->dev) &&
3275                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3276                         netif_wake_queue(tp->dev);
3277                 netif_tx_unlock(tp->dev);
3278         }
3279 }
3280
3281 /* Returns size of skb allocated or < 0 on error.
3282  *
3283  * We only need to fill in the address because the other members
3284  * of the RX descriptor are invariant, see tg3_init_rings.
3285  *
3286  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3287  * posting buffers we only dirty the first cache line of the RX
3288  * descriptor (containing the address).  Whereas for the RX status
3289  * buffers the cpu only reads the last cacheline of the RX descriptor
3290  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3291  */
3292 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3293                             int src_idx, u32 dest_idx_unmasked)
3294 {
3295         struct tg3_rx_buffer_desc *desc;
3296         struct ring_info *map, *src_map;
3297         struct sk_buff *skb;
3298         dma_addr_t mapping;
3299         int skb_size, dest_idx;
3300
3301         src_map = NULL;
3302         switch (opaque_key) {
3303         case RXD_OPAQUE_RING_STD:
3304                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3305                 desc = &tp->rx_std[dest_idx];
3306                 map = &tp->rx_std_buffers[dest_idx];
3307                 if (src_idx >= 0)
3308                         src_map = &tp->rx_std_buffers[src_idx];
3309                 skb_size = tp->rx_pkt_buf_sz;
3310                 break;
3311
3312         case RXD_OPAQUE_RING_JUMBO:
3313                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3314                 desc = &tp->rx_jumbo[dest_idx];
3315                 map = &tp->rx_jumbo_buffers[dest_idx];
3316                 if (src_idx >= 0)
3317                         src_map = &tp->rx_jumbo_buffers[src_idx];
3318                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3319                 break;
3320
3321         default:
3322                 return -EINVAL;
3323         };
3324
3325         /* Do not overwrite any of the map or rp information
3326          * until we are sure we can commit to a new buffer.
3327          *
3328          * Callers depend upon this behavior and assume that
3329          * we leave everything unchanged if we fail.
3330          */
3331         skb = netdev_alloc_skb(tp->dev, skb_size);
3332         if (skb == NULL)
3333                 return -ENOMEM;
3334
3335         skb_reserve(skb, tp->rx_offset);
3336
3337         mapping = pci_map_single(tp->pdev, skb->data,
3338                                  skb_size - tp->rx_offset,
3339                                  PCI_DMA_FROMDEVICE);
3340
3341         map->skb = skb;
3342         pci_unmap_addr_set(map, mapping, mapping);
3343
3344         if (src_map != NULL)
3345                 src_map->skb = NULL;
3346
3347         desc->addr_hi = ((u64)mapping >> 32);
3348         desc->addr_lo = ((u64)mapping & 0xffffffff);
3349
3350         return skb_size;
3351 }
3352
3353 /* We only need to move over in the address because the other
3354  * members of the RX descriptor are invariant.  See notes above
3355  * tg3_alloc_rx_skb for full details.
3356  */
3357 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3358                            int src_idx, u32 dest_idx_unmasked)
3359 {
3360         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3361         struct ring_info *src_map, *dest_map;
3362         int dest_idx;
3363
3364         switch (opaque_key) {
3365         case RXD_OPAQUE_RING_STD:
3366                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3367                 dest_desc = &tp->rx_std[dest_idx];
3368                 dest_map = &tp->rx_std_buffers[dest_idx];
3369                 src_desc = &tp->rx_std[src_idx];
3370                 src_map = &tp->rx_std_buffers[src_idx];
3371                 break;
3372
3373         case RXD_OPAQUE_RING_JUMBO:
3374                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3375                 dest_desc = &tp->rx_jumbo[dest_idx];
3376                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3377                 src_desc = &tp->rx_jumbo[src_idx];
3378                 src_map = &tp->rx_jumbo_buffers[src_idx];
3379                 break;
3380
3381         default:
3382                 return;
3383         };
3384
3385         dest_map->skb = src_map->skb;
3386         pci_unmap_addr_set(dest_map, mapping,
3387                            pci_unmap_addr(src_map, mapping));
3388         dest_desc->addr_hi = src_desc->addr_hi;
3389         dest_desc->addr_lo = src_desc->addr_lo;
3390
3391         src_map->skb = NULL;
3392 }
3393
3394 #if TG3_VLAN_TAG_USED
3395 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3396 {
3397         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3398 }
3399 #endif
3400
3401 /* The RX ring scheme is composed of multiple rings which post fresh
3402  * buffers to the chip, and one special ring the chip uses to report
3403  * status back to the host.
3404  *
3405  * The special ring reports the status of received packets to the
3406  * host.  The chip does not write into the original descriptor the
3407  * RX buffer was obtained from.  The chip simply takes the original
3408  * descriptor as provided by the host, updates the status and length
3409  * field, then writes this into the next status ring entry.
3410  *
3411  * Each ring the host uses to post buffers to the chip is described
3412  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3413  * it is first placed into the on-chip ram.  When the packet's length
3414  * is known, it walks down the TG3_BDINFO entries to select the ring.
3415  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3416  * which is within the range of the new packet's length is chosen.
3417  *
3418  * The "separate ring for rx status" scheme may sound queer, but it makes
3419  * sense from a cache coherency perspective.  If only the host writes
3420  * to the buffer post rings, and only the chip writes to the rx status
3421  * rings, then cache lines never move beyond shared-modified state.
3422  * If both the host and chip were to write into the same ring, cache line
3423  * eviction could occur since both entities want it in an exclusive state.
3424  */
3425 static int tg3_rx(struct tg3 *tp, int budget)
3426 {
3427         u32 work_mask, rx_std_posted = 0;
3428         u32 sw_idx = tp->rx_rcb_ptr;
3429         u16 hw_idx;
3430         int received;
3431
3432         hw_idx = tp->hw_status->idx[0].rx_producer;
3433         /*
3434          * We need to order the read of hw_idx and the read of
3435          * the opaque cookie.
3436          */
3437         rmb();
3438         work_mask = 0;
3439         received = 0;
3440         while (sw_idx != hw_idx && budget > 0) {
3441                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3442                 unsigned int len;
3443                 struct sk_buff *skb;
3444                 dma_addr_t dma_addr;
3445                 u32 opaque_key, desc_idx, *post_ptr;
3446
3447                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3448                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3449                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3450                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3451                                                   mapping);
3452                         skb = tp->rx_std_buffers[desc_idx].skb;
3453                         post_ptr = &tp->rx_std_ptr;
3454                         rx_std_posted++;
3455                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3456                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3457                                                   mapping);
3458                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3459                         post_ptr = &tp->rx_jumbo_ptr;
3460                 }
3461                 else {
3462                         goto next_pkt_nopost;
3463                 }
3464
3465                 work_mask |= opaque_key;
3466
3467                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3468                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3469                 drop_it:
3470                         tg3_recycle_rx(tp, opaque_key,
3471                                        desc_idx, *post_ptr);
3472                 drop_it_no_recycle:
3473                         /* Other statistics kept track of by card. */
3474                         tp->net_stats.rx_dropped++;
3475                         goto next_pkt;
3476                 }
3477
3478                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3479
3480                 if (len > RX_COPY_THRESHOLD
3481                         && tp->rx_offset == 2
3482                         /* rx_offset != 2 iff this is a 5701 card running
3483                          * in PCI-X mode [see tg3_get_invariants()] */
3484                 ) {
3485                         int skb_size;
3486
3487                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3488                                                     desc_idx, *post_ptr);
3489                         if (skb_size < 0)
3490                                 goto drop_it;
3491
3492                         pci_unmap_single(tp->pdev, dma_addr,
3493                                          skb_size - tp->rx_offset,
3494                                          PCI_DMA_FROMDEVICE);
3495
3496                         skb_put(skb, len);
3497                 } else {
3498                         struct sk_buff *copy_skb;
3499
3500                         tg3_recycle_rx(tp, opaque_key,
3501                                        desc_idx, *post_ptr);
3502
3503                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3504                         if (copy_skb == NULL)
3505                                 goto drop_it_no_recycle;
3506
3507                         skb_reserve(copy_skb, 2);
3508                         skb_put(copy_skb, len);
3509                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3510                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3511                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3512
3513                         /* We'll reuse the original ring buffer. */
3514                         skb = copy_skb;
3515                 }
3516
3517                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3518                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3519                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3520                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3521                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3522                 else
3523                         skb->ip_summed = CHECKSUM_NONE;
3524
3525                 skb->protocol = eth_type_trans(skb, tp->dev);
3526 #if TG3_VLAN_TAG_USED
3527                 if (tp->vlgrp != NULL &&
3528                     desc->type_flags & RXD_FLAG_VLAN) {
3529                         tg3_vlan_rx(tp, skb,
3530                                     desc->err_vlan & RXD_VLAN_MASK);
3531                 } else
3532 #endif
3533                         netif_receive_skb(skb);
3534
3535                 tp->dev->last_rx = jiffies;
3536                 received++;
3537                 budget--;
3538
3539 next_pkt:
3540                 (*post_ptr)++;
3541
3542                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3543                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3544
3545                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3546                                      TG3_64BIT_REG_LOW, idx);
3547                         work_mask &= ~RXD_OPAQUE_RING_STD;
3548                         rx_std_posted = 0;
3549                 }
3550 next_pkt_nopost:
3551                 sw_idx++;
3552                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3553
3554                 /* Refresh hw_idx to see if there is new work */
3555                 if (sw_idx == hw_idx) {
3556                         hw_idx = tp->hw_status->idx[0].rx_producer;
3557                         rmb();
3558                 }
3559         }
3560
3561         /* ACK the status ring. */
3562         tp->rx_rcb_ptr = sw_idx;
3563         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3564
3565         /* Refill RX ring(s). */
3566         if (work_mask & RXD_OPAQUE_RING_STD) {
3567                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3568                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3569                              sw_idx);
3570         }
3571         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3572                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3573                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3574                              sw_idx);
3575         }
3576         mmiowb();
3577
3578         return received;
3579 }
3580
3581 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3582 {
3583         struct tg3_hw_status *sblk = tp->hw_status;
3584
3585         /* handle link change and other phy events */
3586         if (!(tp->tg3_flags &
3587               (TG3_FLAG_USE_LINKCHG_REG |
3588                TG3_FLAG_POLL_SERDES))) {
3589                 if (sblk->status & SD_STATUS_LINK_CHG) {
3590                         sblk->status = SD_STATUS_UPDATED |
3591                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3592                         spin_lock(&tp->lock);
3593                         tg3_setup_phy(tp, 0);
3594                         spin_unlock(&tp->lock);
3595                 }
3596         }
3597
3598         /* run TX completion thread */
3599         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3600                 tg3_tx(tp);
3601                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3602                         return work_done;
3603         }
3604
3605         /* run RX thread, within the bounds set by NAPI.
3606          * All RX "locking" is done by ensuring outside
3607          * code synchronizes with tg3->napi.poll()
3608          */
3609         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3610                 work_done += tg3_rx(tp, budget - work_done);
3611
3612         return work_done;
3613 }
3614
3615 static int tg3_poll(struct napi_struct *napi, int budget)
3616 {
3617         struct tg3 *tp = container_of(napi, struct tg3, napi);
3618         int work_done = 0;
3619         struct tg3_hw_status *sblk = tp->hw_status;
3620
3621         while (1) {
3622                 work_done = tg3_poll_work(tp, work_done, budget);
3623
3624                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3625                         goto tx_recovery;
3626
3627                 if (unlikely(work_done >= budget))
3628                         break;
3629
3630                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3631                         /* tp->last_tag is used in tg3_restart_ints() below
3632                          * to tell the hw how much work has been processed,
3633                          * so we must read it before checking for more work.
3634                          */
3635                         tp->last_tag = sblk->status_tag;
3636                         rmb();
3637                 } else
3638                         sblk->status &= ~SD_STATUS_UPDATED;
3639
3640                 if (likely(!tg3_has_work(tp))) {
3641                         netif_rx_complete(tp->dev, napi);
3642                         tg3_restart_ints(tp);
3643                         break;
3644                 }
3645         }
3646
3647         return work_done;
3648
3649 tx_recovery:
3650         /* work_done is guaranteed to be less than budget. */
3651         netif_rx_complete(tp->dev, napi);
3652         schedule_work(&tp->reset_task);
3653         return work_done;
3654 }
3655
3656 static void tg3_irq_quiesce(struct tg3 *tp)
3657 {
3658         BUG_ON(tp->irq_sync);
3659
3660         tp->irq_sync = 1;
3661         smp_mb();
3662
3663         synchronize_irq(tp->pdev->irq);
3664 }
3665
3666 static inline int tg3_irq_sync(struct tg3 *tp)
3667 {
3668         return tp->irq_sync;
3669 }
3670
3671 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3672  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3673  * with as well.  Most of the time, this is not necessary except when
3674  * shutting down the device.
3675  */
3676 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3677 {
3678         spin_lock_bh(&tp->lock);
3679         if (irq_sync)
3680                 tg3_irq_quiesce(tp);
3681 }
3682
3683 static inline void tg3_full_unlock(struct tg3 *tp)
3684 {
3685         spin_unlock_bh(&tp->lock);
3686 }
3687
3688 /* One-shot MSI handler - Chip automatically disables interrupt
3689  * after sending MSI so driver doesn't have to do it.
3690  */
3691 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3692 {
3693         struct net_device *dev = dev_id;
3694         struct tg3 *tp = netdev_priv(dev);
3695
3696         prefetch(tp->hw_status);
3697         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3698
3699         if (likely(!tg3_irq_sync(tp)))
3700                 netif_rx_schedule(dev, &tp->napi);
3701
3702         return IRQ_HANDLED;
3703 }
3704
3705 /* MSI ISR - No need to check for interrupt sharing and no need to
3706  * flush status block and interrupt mailbox. PCI ordering rules
3707  * guarantee that MSI will arrive after the status block.
3708  */
3709 static irqreturn_t tg3_msi(int irq, void *dev_id)
3710 {
3711         struct net_device *dev = dev_id;
3712         struct tg3 *tp = netdev_priv(dev);
3713
3714         prefetch(tp->hw_status);
3715         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3716         /*
3717          * Writing any value to intr-mbox-0 clears PCI INTA# and
3718          * chip-internal interrupt pending events.
3719          * Writing non-zero to intr-mbox-0 additional tells the
3720          * NIC to stop sending us irqs, engaging "in-intr-handler"
3721          * event coalescing.
3722          */
3723         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3724         if (likely(!tg3_irq_sync(tp)))
3725                 netif_rx_schedule(dev, &tp->napi);
3726
3727         return IRQ_RETVAL(1);
3728 }
3729
3730 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3731 {
3732         struct net_device *dev = dev_id;
3733         struct tg3 *tp = netdev_priv(dev);
3734         struct tg3_hw_status *sblk = tp->hw_status;
3735         unsigned int handled = 1;
3736
3737         /* In INTx mode, it is possible for the interrupt to arrive at
3738          * the CPU before the status block posted prior to the interrupt.
3739          * Reading the PCI State register will confirm whether the
3740          * interrupt is ours and will flush the status block.
3741          */
3742         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3743                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3744                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3745                         handled = 0;
3746                         goto out;
3747                 }
3748         }
3749
3750         /*
3751          * Writing any value to intr-mbox-0 clears PCI INTA# and
3752          * chip-internal interrupt pending events.
3753          * Writing non-zero to intr-mbox-0 additional tells the
3754          * NIC to stop sending us irqs, engaging "in-intr-handler"
3755          * event coalescing.
3756          *
3757          * Flush the mailbox to de-assert the IRQ immediately to prevent
3758          * spurious interrupts.  The flush impacts performance but
3759          * excessive spurious interrupts can be worse in some cases.
3760          */
3761         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3762         if (tg3_irq_sync(tp))
3763                 goto out;
3764         sblk->status &= ~SD_STATUS_UPDATED;
3765         if (likely(tg3_has_work(tp))) {
3766                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3767                 netif_rx_schedule(dev, &tp->napi);
3768         } else {
3769                 /* No work, shared interrupt perhaps?  re-enable
3770                  * interrupts, and flush that PCI write
3771                  */
3772                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3773                                0x00000000);
3774         }
3775 out:
3776         return IRQ_RETVAL(handled);
3777 }
3778
3779 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3780 {
3781         struct net_device *dev = dev_id;
3782         struct tg3 *tp = netdev_priv(dev);
3783         struct tg3_hw_status *sblk = tp->hw_status;
3784         unsigned int handled = 1;
3785
3786         /* In INTx mode, it is possible for the interrupt to arrive at
3787          * the CPU before the status block posted prior to the interrupt.
3788          * Reading the PCI State register will confirm whether the
3789          * interrupt is ours and will flush the status block.
3790          */
3791         if (unlikely(sblk->status_tag == tp->last_tag)) {
3792                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3793                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3794                         handled = 0;
3795                         goto out;
3796                 }
3797         }
3798
3799         /*
3800          * writing any value to intr-mbox-0 clears PCI INTA# and
3801          * chip-internal interrupt pending events.
3802          * writing non-zero to intr-mbox-0 additional tells the
3803          * NIC to stop sending us irqs, engaging "in-intr-handler"
3804          * event coalescing.
3805          *
3806          * Flush the mailbox to de-assert the IRQ immediately to prevent
3807          * spurious interrupts.  The flush impacts performance but
3808          * excessive spurious interrupts can be worse in some cases.
3809          */
3810         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3811         if (tg3_irq_sync(tp))
3812                 goto out;
3813         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3814                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3815                 /* Update last_tag to mark that this status has been
3816                  * seen. Because interrupt may be shared, we may be
3817                  * racing with tg3_poll(), so only update last_tag
3818                  * if tg3_poll() is not scheduled.
3819                  */
3820                 tp->last_tag = sblk->status_tag;
3821                 __netif_rx_schedule(dev, &tp->napi);
3822         }
3823 out:
3824         return IRQ_RETVAL(handled);
3825 }
3826
3827 /* ISR for interrupt test */
3828 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3829 {
3830         struct net_device *dev = dev_id;
3831         struct tg3 *tp = netdev_priv(dev);
3832         struct tg3_hw_status *sblk = tp->hw_status;
3833
3834         if ((sblk->status & SD_STATUS_UPDATED) ||
3835             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3836                 tg3_disable_ints(tp);
3837                 return IRQ_RETVAL(1);
3838         }
3839         return IRQ_RETVAL(0);
3840 }
3841
3842 static int tg3_init_hw(struct tg3 *, int);
3843 static int tg3_halt(struct tg3 *, int, int);
3844
3845 /* Restart hardware after configuration changes, self-test, etc.
3846  * Invoked with tp->lock held.
3847  */
3848 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3849 {
3850         int err;
3851
3852         err = tg3_init_hw(tp, reset_phy);
3853         if (err) {
3854                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3855                        "aborting.\n", tp->dev->name);
3856                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3857                 tg3_full_unlock(tp);
3858                 del_timer_sync(&tp->timer);
3859                 tp->irq_sync = 0;
3860                 napi_enable(&tp->napi);
3861                 dev_close(tp->dev);
3862                 tg3_full_lock(tp, 0);
3863         }
3864         return err;
3865 }
3866
3867 #ifdef CONFIG_NET_POLL_CONTROLLER
3868 static void tg3_poll_controller(struct net_device *dev)
3869 {
3870         struct tg3 *tp = netdev_priv(dev);
3871
3872         tg3_interrupt(tp->pdev->irq, dev);
3873 }
3874 #endif
3875
3876 static void tg3_reset_task(struct work_struct *work)
3877 {
3878         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3879         unsigned int restart_timer;
3880
3881         tg3_full_lock(tp, 0);
3882
3883         if (!netif_running(tp->dev)) {
3884                 tg3_full_unlock(tp);
3885                 return;
3886         }
3887
3888         tg3_full_unlock(tp);
3889
3890         tg3_netif_stop(tp);
3891
3892         tg3_full_lock(tp, 1);
3893
3894         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3895         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3896
3897         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3898                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3899                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3900                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3901                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3902         }
3903
3904         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3905         if (tg3_init_hw(tp, 1))
3906                 goto out;
3907
3908         tg3_netif_start(tp);
3909
3910         if (restart_timer)
3911                 mod_timer(&tp->timer, jiffies + 1);
3912
3913 out:
3914         tg3_full_unlock(tp);
3915 }
3916
3917 static void tg3_dump_short_state(struct tg3 *tp)
3918 {
3919         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3920                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3921         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3922                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3923 }
3924
3925 static void tg3_tx_timeout(struct net_device *dev)
3926 {
3927         struct tg3 *tp = netdev_priv(dev);
3928
3929         if (netif_msg_tx_err(tp)) {
3930                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3931                        dev->name);
3932                 tg3_dump_short_state(tp);
3933         }
3934
3935         schedule_work(&tp->reset_task);
3936 }
3937
3938 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3939 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3940 {
3941         u32 base = (u32) mapping & 0xffffffff;
3942
3943         return ((base > 0xffffdcc0) &&
3944                 (base + len + 8 < base));
3945 }
3946
3947 /* Test for DMA addresses > 40-bit */
3948 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3949                                           int len)
3950 {
3951 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3952         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3953                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3954         return 0;
3955 #else
3956         return 0;
3957 #endif
3958 }
3959
3960 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3961
3962 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3963 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3964                                        u32 last_plus_one, u32 *start,
3965                                        u32 base_flags, u32 mss)
3966 {
3967         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3968         dma_addr_t new_addr = 0;
3969         u32 entry = *start;
3970         int i, ret = 0;
3971
3972         if (!new_skb) {
3973                 ret = -1;
3974         } else {
3975                 /* New SKB is guaranteed to be linear. */
3976                 entry = *start;
3977                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3978                                           PCI_DMA_TODEVICE);
3979                 /* Make sure new skb does not cross any 4G boundaries.
3980                  * Drop the packet if it does.
3981                  */
3982                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3983                         ret = -1;
3984                         dev_kfree_skb(new_skb);
3985                         new_skb = NULL;
3986                 } else {
3987                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3988                                     base_flags, 1 | (mss << 1));
3989                         *start = NEXT_TX(entry);
3990                 }
3991         }
3992
3993         /* Now clean up the sw ring entries. */
3994         i = 0;
3995         while (entry != last_plus_one) {
3996                 int len;
3997
3998                 if (i == 0)
3999                         len = skb_headlen(skb);
4000                 else
4001                         len = skb_shinfo(skb)->frags[i-1].size;
4002                 pci_unmap_single(tp->pdev,
4003                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4004                                  len, PCI_DMA_TODEVICE);
4005                 if (i == 0) {
4006                         tp->tx_buffers[entry].skb = new_skb;
4007                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4008                 } else {
4009                         tp->tx_buffers[entry].skb = NULL;
4010                 }
4011                 entry = NEXT_TX(entry);
4012                 i++;
4013         }
4014
4015         dev_kfree_skb(skb);
4016
4017         return ret;
4018 }
4019
4020 static void tg3_set_txd(struct tg3 *tp, int entry,
4021                         dma_addr_t mapping, int len, u32 flags,
4022                         u32 mss_and_is_end)
4023 {
4024         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4025         int is_end = (mss_and_is_end & 0x1);
4026         u32 mss = (mss_and_is_end >> 1);
4027         u32 vlan_tag = 0;
4028
4029         if (is_end)
4030                 flags |= TXD_FLAG_END;
4031         if (flags & TXD_FLAG_VLAN) {
4032                 vlan_tag = flags >> 16;
4033                 flags &= 0xffff;
4034         }
4035         vlan_tag |= (mss << TXD_MSS_SHIFT);
4036
4037         txd->addr_hi = ((u64) mapping >> 32);
4038         txd->addr_lo = ((u64) mapping & 0xffffffff);
4039         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4040         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4041 }
4042
4043 /* hard_start_xmit for devices that don't have any bugs and
4044  * support TG3_FLG2_HW_TSO_2 only.
4045  */
4046 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4047 {
4048         struct tg3 *tp = netdev_priv(dev);
4049         dma_addr_t mapping;
4050         u32 len, entry, base_flags, mss;
4051
4052         len = skb_headlen(skb);
4053
4054         /* We are running in BH disabled context with netif_tx_lock
4055          * and TX reclaim runs via tp->napi.poll inside of a software
4056          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4057          * no IRQ context deadlocks to worry about either.  Rejoice!
4058          */
4059         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4060                 if (!netif_queue_stopped(dev)) {
4061                         netif_stop_queue(dev);
4062
4063                         /* This is a hard error, log it. */
4064                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4065                                "queue awake!\n", dev->name);
4066                 }
4067                 return NETDEV_TX_BUSY;
4068         }
4069
4070         entry = tp->tx_prod;
4071         base_flags = 0;
4072         mss = 0;
4073         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4074                 int tcp_opt_len, ip_tcp_len;
4075
4076                 if (skb_header_cloned(skb) &&
4077                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4078                         dev_kfree_skb(skb);
4079                         goto out_unlock;
4080                 }
4081
4082                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4083                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4084                 else {
4085                         struct iphdr *iph = ip_hdr(skb);
4086
4087                         tcp_opt_len = tcp_optlen(skb);
4088                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4089
4090                         iph->check = 0;
4091                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4092                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4093                 }
4094
4095                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4096                                TXD_FLAG_CPU_POST_DMA);
4097
4098                 tcp_hdr(skb)->check = 0;
4099
4100         }
4101         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4102                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4103 #if TG3_VLAN_TAG_USED
4104         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4105                 base_flags |= (TXD_FLAG_VLAN |
4106                                (vlan_tx_tag_get(skb) << 16));
4107 #endif
4108
4109         /* Queue skb data, a.k.a. the main skb fragment. */
4110         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4111
4112         tp->tx_buffers[entry].skb = skb;
4113         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4114
4115         tg3_set_txd(tp, entry, mapping, len, base_flags,
4116                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4117
4118         entry = NEXT_TX(entry);
4119
4120         /* Now loop through additional data fragments, and queue them. */
4121         if (skb_shinfo(skb)->nr_frags > 0) {
4122                 unsigned int i, last;
4123
4124                 last = skb_shinfo(skb)->nr_frags - 1;
4125                 for (i = 0; i <= last; i++) {
4126                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4127
4128                         len = frag->size;
4129                         mapping = pci_map_page(tp->pdev,
4130                                                frag->page,
4131                                                frag->page_offset,
4132                                                len, PCI_DMA_TODEVICE);
4133
4134                         tp->tx_buffers[entry].skb = NULL;
4135                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4136
4137                         tg3_set_txd(tp, entry, mapping, len,
4138                                     base_flags, (i == last) | (mss << 1));
4139
4140                         entry = NEXT_TX(entry);
4141                 }
4142         }
4143
4144         /* Packets are ready, update Tx producer idx local and on card. */
4145         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4146
4147         tp->tx_prod = entry;
4148         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4149                 netif_stop_queue(dev);
4150                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4151                         netif_wake_queue(tp->dev);
4152         }
4153
4154 out_unlock:
4155         mmiowb();
4156
4157         dev->trans_start = jiffies;
4158
4159         return NETDEV_TX_OK;
4160 }
4161
4162 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4163
4164 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4165  * TSO header is greater than 80 bytes.
4166  */
4167 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4168 {
4169         struct sk_buff *segs, *nskb;
4170
4171         /* Estimate the number of fragments in the worst case */
4172         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4173                 netif_stop_queue(tp->dev);
4174                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4175                         return NETDEV_TX_BUSY;
4176
4177                 netif_wake_queue(tp->dev);
4178         }
4179
4180         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4181         if (unlikely(IS_ERR(segs)))
4182                 goto tg3_tso_bug_end;
4183
4184         do {
4185                 nskb = segs;
4186                 segs = segs->next;
4187                 nskb->next = NULL;
4188                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4189         } while (segs);
4190
4191 tg3_tso_bug_end:
4192         dev_kfree_skb(skb);
4193
4194         return NETDEV_TX_OK;
4195 }
4196
4197 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4198  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4199  */
4200 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4201 {
4202         struct tg3 *tp = netdev_priv(dev);
4203         dma_addr_t mapping;
4204         u32 len, entry, base_flags, mss;
4205         int would_hit_hwbug;
4206
4207         len = skb_headlen(skb);
4208
4209         /* We are running in BH disabled context with netif_tx_lock
4210          * and TX reclaim runs via tp->napi.poll inside of a software
4211          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4212          * no IRQ context deadlocks to worry about either.  Rejoice!
4213          */
4214         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4215                 if (!netif_queue_stopped(dev)) {
4216                         netif_stop_queue(dev);
4217
4218                         /* This is a hard error, log it. */
4219                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4220                                "queue awake!\n", dev->name);
4221                 }
4222                 return NETDEV_TX_BUSY;
4223         }
4224
4225         entry = tp->tx_prod;
4226         base_flags = 0;
4227         if (skb->ip_summed == CHECKSUM_PARTIAL)
4228                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4229         mss = 0;
4230         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4231                 struct iphdr *iph;
4232                 int tcp_opt_len, ip_tcp_len, hdr_len;
4233
4234                 if (skb_header_cloned(skb) &&
4235                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4236                         dev_kfree_skb(skb);
4237                         goto out_unlock;
4238                 }
4239
4240                 tcp_opt_len = tcp_optlen(skb);
4241                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4242
4243                 hdr_len = ip_tcp_len + tcp_opt_len;
4244                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4245                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4246                         return (tg3_tso_bug(tp, skb));
4247
4248                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4249                                TXD_FLAG_CPU_POST_DMA);
4250
4251                 iph = ip_hdr(skb);
4252                 iph->check = 0;
4253                 iph->tot_len = htons(mss + hdr_len);
4254                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4255                         tcp_hdr(skb)->check = 0;
4256                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4257                 } else
4258                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4259                                                                  iph->daddr, 0,
4260                                                                  IPPROTO_TCP,
4261                                                                  0);
4262
4263                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4264                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4265                         if (tcp_opt_len || iph->ihl > 5) {
4266                                 int tsflags;
4267
4268                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4269                                 mss |= (tsflags << 11);
4270                         }
4271                 } else {
4272                         if (tcp_opt_len || iph->ihl > 5) {
4273                                 int tsflags;
4274
4275                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4276                                 base_flags |= tsflags << 12;
4277                         }
4278                 }
4279         }
4280 #if TG3_VLAN_TAG_USED
4281         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4282                 base_flags |= (TXD_FLAG_VLAN |
4283                                (vlan_tx_tag_get(skb) << 16));
4284 #endif
4285
4286         /* Queue skb data, a.k.a. the main skb fragment. */
4287         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4288
4289         tp->tx_buffers[entry].skb = skb;
4290         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4291
4292         would_hit_hwbug = 0;
4293
4294         if (tg3_4g_overflow_test(mapping, len))
4295                 would_hit_hwbug = 1;
4296
4297         tg3_set_txd(tp, entry, mapping, len, base_flags,
4298                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4299
4300         entry = NEXT_TX(entry);
4301
4302         /* Now loop through additional data fragments, and queue them. */
4303         if (skb_shinfo(skb)->nr_frags > 0) {
4304                 unsigned int i, last;
4305
4306                 last = skb_shinfo(skb)->nr_frags - 1;
4307                 for (i = 0; i <= last; i++) {
4308                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4309
4310                         len = frag->size;
4311                         mapping = pci_map_page(tp->pdev,
4312                                                frag->page,
4313                                                frag->page_offset,
4314                                                len, PCI_DMA_TODEVICE);
4315
4316                         tp->tx_buffers[entry].skb = NULL;
4317                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4318
4319                         if (tg3_4g_overflow_test(mapping, len))
4320                                 would_hit_hwbug = 1;
4321
4322                         if (tg3_40bit_overflow_test(tp, mapping, len))
4323                                 would_hit_hwbug = 1;
4324
4325                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4326                                 tg3_set_txd(tp, entry, mapping, len,
4327                                             base_flags, (i == last)|(mss << 1));
4328                         else
4329                                 tg3_set_txd(tp, entry, mapping, len,
4330                                             base_flags, (i == last));
4331
4332                         entry = NEXT_TX(entry);
4333                 }
4334         }
4335
4336         if (would_hit_hwbug) {
4337                 u32 last_plus_one = entry;
4338                 u32 start;
4339
4340                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4341                 start &= (TG3_TX_RING_SIZE - 1);
4342
4343                 /* If the workaround fails due to memory/mapping
4344                  * failure, silently drop this packet.
4345                  */
4346                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4347                                                 &start, base_flags, mss))
4348                         goto out_unlock;
4349
4350                 entry = start;
4351         }
4352
4353         /* Packets are ready, update Tx producer idx local and on card. */
4354         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4355
4356         tp->tx_prod = entry;
4357         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4358                 netif_stop_queue(dev);
4359                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4360                         netif_wake_queue(tp->dev);
4361         }
4362
4363 out_unlock:
4364         mmiowb();
4365
4366         dev->trans_start = jiffies;
4367
4368         return NETDEV_TX_OK;
4369 }
4370
4371 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4372                                int new_mtu)
4373 {
4374         dev->mtu = new_mtu;
4375
4376         if (new_mtu > ETH_DATA_LEN) {
4377                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4378                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4379                         ethtool_op_set_tso(dev, 0);
4380                 }
4381                 else
4382                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4383         } else {
4384                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4385                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4386                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4387         }
4388 }
4389
4390 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4391 {
4392         struct tg3 *tp = netdev_priv(dev);
4393         int err;
4394
4395         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4396                 return -EINVAL;
4397
4398         if (!netif_running(dev)) {
4399                 /* We'll just catch it later when the
4400                  * device is up'd.
4401                  */
4402                 tg3_set_mtu(dev, tp, new_mtu);
4403                 return 0;
4404         }
4405
4406         tg3_netif_stop(tp);
4407
4408         tg3_full_lock(tp, 1);
4409
4410         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4411
4412         tg3_set_mtu(dev, tp, new_mtu);
4413
4414         err = tg3_restart_hw(tp, 0);
4415
4416         if (!err)
4417                 tg3_netif_start(tp);
4418
4419         tg3_full_unlock(tp);
4420
4421         return err;
4422 }
4423
4424 /* Free up pending packets in all rx/tx rings.
4425  *
4426  * The chip has been shut down and the driver detached from
4427  * the networking, so no interrupts or new tx packets will
4428  * end up in the driver.  tp->{tx,}lock is not held and we are not
4429  * in an interrupt context and thus may sleep.
4430  */
4431 static void tg3_free_rings(struct tg3 *tp)
4432 {
4433         struct ring_info *rxp;
4434         int i;
4435
4436         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4437                 rxp = &tp->rx_std_buffers[i];
4438
4439                 if (rxp->skb == NULL)
4440                         continue;
4441                 pci_unmap_single(tp->pdev,
4442                                  pci_unmap_addr(rxp, mapping),
4443                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4444                                  PCI_DMA_FROMDEVICE);
4445                 dev_kfree_skb_any(rxp->skb);
4446                 rxp->skb = NULL;
4447         }
4448
4449         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4450                 rxp = &tp->rx_jumbo_buffers[i];
4451
4452                 if (rxp->skb == NULL)
4453                         continue;
4454                 pci_unmap_single(tp->pdev,
4455                                  pci_unmap_addr(rxp, mapping),
4456                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4457                                  PCI_DMA_FROMDEVICE);
4458                 dev_kfree_skb_any(rxp->skb);
4459                 rxp->skb = NULL;
4460         }
4461
4462         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4463                 struct tx_ring_info *txp;
4464                 struct sk_buff *skb;
4465                 int j;
4466
4467                 txp = &tp->tx_buffers[i];
4468                 skb = txp->skb;
4469
4470                 if (skb == NULL) {
4471                         i++;
4472                         continue;
4473                 }
4474
4475                 pci_unmap_single(tp->pdev,
4476                                  pci_unmap_addr(txp, mapping),
4477                                  skb_headlen(skb),
4478                                  PCI_DMA_TODEVICE);
4479                 txp->skb = NULL;
4480
4481                 i++;
4482
4483                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4484                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4485                         pci_unmap_page(tp->pdev,
4486                                        pci_unmap_addr(txp, mapping),
4487                                        skb_shinfo(skb)->frags[j].size,
4488                                        PCI_DMA_TODEVICE);
4489                         i++;
4490                 }
4491
4492                 dev_kfree_skb_any(skb);
4493         }
4494 }
4495
4496 /* Initialize tx/rx rings for packet processing.
4497  *
4498  * The chip has been shut down and the driver detached from
4499  * the networking, so no interrupts or new tx packets will
4500  * end up in the driver.  tp->{tx,}lock are held and thus
4501  * we may not sleep.
4502  */
4503 static int tg3_init_rings(struct tg3 *tp)
4504 {
4505         u32 i;
4506
4507         /* Free up all the SKBs. */
4508         tg3_free_rings(tp);
4509
4510         /* Zero out all descriptors. */
4511         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4512         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4513         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4514         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4515
4516         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4517         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4518             (tp->dev->mtu > ETH_DATA_LEN))
4519                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4520
4521         /* Initialize invariants of the rings, we only set this
4522          * stuff once.  This works because the card does not
4523          * write into the rx buffer posting rings.
4524          */
4525         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4526                 struct tg3_rx_buffer_desc *rxd;
4527
4528                 rxd = &tp->rx_std[i];
4529                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4530                         << RXD_LEN_SHIFT;
4531                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4532                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4533                                (i << RXD_OPAQUE_INDEX_SHIFT));
4534         }
4535
4536         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4537                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4538                         struct tg3_rx_buffer_desc *rxd;
4539
4540                         rxd = &tp->rx_jumbo[i];
4541                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4542                                 << RXD_LEN_SHIFT;
4543                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4544                                 RXD_FLAG_JUMBO;
4545                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4546                                (i << RXD_OPAQUE_INDEX_SHIFT));
4547                 }
4548         }
4549
4550         /* Now allocate fresh SKBs for each rx ring. */
4551         for (i = 0; i < tp->rx_pending; i++) {
4552                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4553                         printk(KERN_WARNING PFX
4554                                "%s: Using a smaller RX standard ring, "
4555                                "only %d out of %d buffers were allocated "
4556                                "successfully.\n",
4557                                tp->dev->name, i, tp->rx_pending);
4558                         if (i == 0)
4559                                 return -ENOMEM;
4560                         tp->rx_pending = i;
4561                         break;
4562                 }
4563         }
4564
4565         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4566                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4567                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4568                                              -1, i) < 0) {
4569                                 printk(KERN_WARNING PFX
4570                                        "%s: Using a smaller RX jumbo ring, "
4571                                        "only %d out of %d buffers were "
4572                                        "allocated successfully.\n",
4573                                        tp->dev->name, i, tp->rx_jumbo_pending);
4574                                 if (i == 0) {
4575                                         tg3_free_rings(tp);
4576                                         return -ENOMEM;
4577                                 }
4578                                 tp->rx_jumbo_pending = i;
4579                                 break;
4580                         }
4581                 }
4582         }
4583         return 0;
4584 }
4585
4586 /*
4587  * Must not be invoked with interrupt sources disabled and
4588  * the hardware shutdown down.
4589  */
4590 static void tg3_free_consistent(struct tg3 *tp)
4591 {
4592         kfree(tp->rx_std_buffers);
4593         tp->rx_std_buffers = NULL;
4594         if (tp->rx_std) {
4595                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4596                                     tp->rx_std, tp->rx_std_mapping);
4597                 tp->rx_std = NULL;
4598         }
4599         if (tp->rx_jumbo) {
4600                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4601                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4602                 tp->rx_jumbo = NULL;
4603         }
4604         if (tp->rx_rcb) {
4605                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4606                                     tp->rx_rcb, tp->rx_rcb_mapping);
4607                 tp->rx_rcb = NULL;
4608         }
4609         if (tp->tx_ring) {
4610                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4611                         tp->tx_ring, tp->tx_desc_mapping);
4612                 tp->tx_ring = NULL;
4613         }
4614         if (tp->hw_status) {
4615                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4616                                     tp->hw_status, tp->status_mapping);
4617                 tp->hw_status = NULL;
4618         }
4619         if (tp->hw_stats) {
4620                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4621                                     tp->hw_stats, tp->stats_mapping);
4622                 tp->hw_stats = NULL;
4623         }
4624 }
4625
4626 /*
4627  * Must not be invoked with interrupt sources disabled and
4628  * the hardware shutdown down.  Can sleep.
4629  */
4630 static int tg3_alloc_consistent(struct tg3 *tp)
4631 {
4632         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4633                                       (TG3_RX_RING_SIZE +
4634                                        TG3_RX_JUMBO_RING_SIZE)) +
4635                                      (sizeof(struct tx_ring_info) *
4636                                       TG3_TX_RING_SIZE),
4637                                      GFP_KERNEL);
4638         if (!tp->rx_std_buffers)
4639                 return -ENOMEM;
4640
4641         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4642         tp->tx_buffers = (struct tx_ring_info *)
4643                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4644
4645         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4646                                           &tp->rx_std_mapping);
4647         if (!tp->rx_std)
4648                 goto err_out;
4649
4650         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4651                                             &tp->rx_jumbo_mapping);
4652
4653         if (!tp->rx_jumbo)
4654                 goto err_out;
4655
4656         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4657                                           &tp->rx_rcb_mapping);
4658         if (!tp->rx_rcb)
4659                 goto err_out;
4660
4661         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4662                                            &tp->tx_desc_mapping);
4663         if (!tp->tx_ring)
4664                 goto err_out;
4665
4666         tp->hw_status = pci_alloc_consistent(tp->pdev,
4667                                              TG3_HW_STATUS_SIZE,
4668                                              &tp->status_mapping);
4669         if (!tp->hw_status)
4670                 goto err_out;
4671
4672         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4673                                             sizeof(struct tg3_hw_stats),
4674                                             &tp->stats_mapping);
4675         if (!tp->hw_stats)
4676                 goto err_out;
4677
4678         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4679         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4680
4681         return 0;
4682
4683 err_out:
4684         tg3_free_consistent(tp);
4685         return -ENOMEM;
4686 }
4687
4688 #define MAX_WAIT_CNT 1000
4689
4690 /* To stop a block, clear the enable bit and poll till it
4691  * clears.  tp->lock is held.
4692  */
4693 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4694 {
4695         unsigned int i;
4696         u32 val;
4697
4698         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4699                 switch (ofs) {
4700                 case RCVLSC_MODE:
4701                 case DMAC_MODE:
4702                 case MBFREE_MODE:
4703                 case BUFMGR_MODE:
4704                 case MEMARB_MODE:
4705                         /* We can't enable/disable these bits of the
4706                          * 5705/5750, just say success.
4707                          */
4708                         return 0;
4709
4710                 default:
4711                         break;
4712                 };
4713         }
4714
4715         val = tr32(ofs);
4716         val &= ~enable_bit;
4717         tw32_f(ofs, val);
4718
4719         for (i = 0; i < MAX_WAIT_CNT; i++) {
4720                 udelay(100);
4721                 val = tr32(ofs);
4722                 if ((val & enable_bit) == 0)
4723                         break;
4724         }
4725
4726         if (i == MAX_WAIT_CNT && !silent) {
4727                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4728                        "ofs=%lx enable_bit=%x\n",
4729                        ofs, enable_bit);
4730                 return -ENODEV;
4731         }
4732
4733         return 0;
4734 }
4735
4736 /* tp->lock is held. */
4737 static int tg3_abort_hw(struct tg3 *tp, int silent)
4738 {
4739         int i, err;
4740
4741         tg3_disable_ints(tp);
4742
4743         tp->rx_mode &= ~RX_MODE_ENABLE;
4744         tw32_f(MAC_RX_MODE, tp->rx_mode);
4745         udelay(10);
4746
4747         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4748         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4749         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4750         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4751         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4752         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4753
4754         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4755         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4756         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4757         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4758         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4759         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4760         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4761
4762         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4763         tw32_f(MAC_MODE, tp->mac_mode);
4764         udelay(40);
4765
4766         tp->tx_mode &= ~TX_MODE_ENABLE;
4767         tw32_f(MAC_TX_MODE, tp->tx_mode);
4768
4769         for (i = 0; i < MAX_WAIT_CNT; i++) {
4770                 udelay(100);
4771                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4772                         break;
4773         }
4774         if (i >= MAX_WAIT_CNT) {
4775                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4776                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4777                        tp->dev->name, tr32(MAC_TX_MODE));
4778                 err |= -ENODEV;
4779         }
4780
4781         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4782         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4783         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4784
4785         tw32(FTQ_RESET, 0xffffffff);
4786         tw32(FTQ_RESET, 0x00000000);
4787
4788         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4789         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4790
4791         if (tp->hw_status)
4792                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4793         if (tp->hw_stats)
4794                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4795
4796         return err;
4797 }
4798
4799 /* tp->lock is held. */
4800 static int tg3_nvram_lock(struct tg3 *tp)
4801 {
4802         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4803                 int i;
4804
4805                 if (tp->nvram_lock_cnt == 0) {
4806                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4807                         for (i = 0; i < 8000; i++) {
4808                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4809                                         break;
4810                                 udelay(20);
4811                         }
4812                         if (i == 8000) {
4813                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4814                                 return -ENODEV;
4815                         }
4816                 }
4817                 tp->nvram_lock_cnt++;
4818         }
4819         return 0;
4820 }
4821
4822 /* tp->lock is held. */
4823 static void tg3_nvram_unlock(struct tg3 *tp)
4824 {
4825         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4826                 if (tp->nvram_lock_cnt > 0)
4827                         tp->nvram_lock_cnt--;
4828                 if (tp->nvram_lock_cnt == 0)
4829                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4830         }
4831 }
4832
4833 /* tp->lock is held. */
4834 static void tg3_enable_nvram_access(struct tg3 *tp)
4835 {
4836         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4837             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4838                 u32 nvaccess = tr32(NVRAM_ACCESS);
4839
4840                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4841         }
4842 }
4843
4844 /* tp->lock is held. */
4845 static void tg3_disable_nvram_access(struct tg3 *tp)
4846 {
4847         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4848             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4849                 u32 nvaccess = tr32(NVRAM_ACCESS);
4850
4851                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4852         }
4853 }
4854
4855 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4856 {
4857         int i;
4858         u32 apedata;
4859
4860         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4861         if (apedata != APE_SEG_SIG_MAGIC)
4862                 return;
4863
4864         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4865         if (apedata != APE_FW_STATUS_READY)
4866                 return;
4867
4868         /* Wait for up to 1 millisecond for APE to service previous event. */
4869         for (i = 0; i < 10; i++) {
4870                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4871                         return;
4872
4873                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4874
4875                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4876                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4877                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4878
4879                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4880
4881                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4882                         break;
4883
4884                 udelay(100);
4885         }
4886
4887         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4888                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4889 }
4890
4891 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4892 {
4893         u32 event;
4894         u32 apedata;
4895
4896         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4897                 return;
4898
4899         switch (kind) {
4900                 case RESET_KIND_INIT:
4901                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4902                                         APE_HOST_SEG_SIG_MAGIC);
4903                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4904                                         APE_HOST_SEG_LEN_MAGIC);
4905                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4906                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4907                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4908                                         APE_HOST_DRIVER_ID_MAGIC);
4909                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4910                                         APE_HOST_BEHAV_NO_PHYLOCK);
4911
4912                         event = APE_EVENT_STATUS_STATE_START;
4913                         break;
4914                 case RESET_KIND_SHUTDOWN:
4915                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4916                         break;
4917                 case RESET_KIND_SUSPEND:
4918                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4919                         break;
4920                 default:
4921                         return;
4922         }
4923
4924         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4925
4926         tg3_ape_send_event(tp, event);
4927 }
4928
4929 /* tp->lock is held. */
4930 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4931 {
4932         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4933                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4934
4935         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4936                 switch (kind) {
4937                 case RESET_KIND_INIT:
4938                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4939                                       DRV_STATE_START);
4940                         break;
4941
4942                 case RESET_KIND_SHUTDOWN:
4943                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4944                                       DRV_STATE_UNLOAD);
4945                         break;
4946
4947                 case RESET_KIND_SUSPEND:
4948                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4949                                       DRV_STATE_SUSPEND);
4950                         break;
4951
4952                 default:
4953                         break;
4954                 };
4955         }
4956
4957         if (kind == RESET_KIND_INIT ||
4958             kind == RESET_KIND_SUSPEND)
4959                 tg3_ape_driver_state_change(tp, kind);
4960 }
4961
4962 /* tp->lock is held. */
4963 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4964 {
4965         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4966                 switch (kind) {
4967                 case RESET_KIND_INIT:
4968                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4969                                       DRV_STATE_START_DONE);
4970                         break;
4971
4972                 case RESET_KIND_SHUTDOWN:
4973                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4974                                       DRV_STATE_UNLOAD_DONE);
4975                         break;
4976
4977                 default:
4978                         break;
4979                 };
4980         }
4981
4982         if (kind == RESET_KIND_SHUTDOWN)
4983                 tg3_ape_driver_state_change(tp, kind);
4984 }
4985
4986 /* tp->lock is held. */
4987 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4988 {
4989         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4990                 switch (kind) {
4991                 case RESET_KIND_INIT:
4992                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4993                                       DRV_STATE_START);
4994                         break;
4995
4996                 case RESET_KIND_SHUTDOWN:
4997                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4998                                       DRV_STATE_UNLOAD);
4999                         break;
5000
5001                 case RESET_KIND_SUSPEND:
5002                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5003                                       DRV_STATE_SUSPEND);
5004                         break;
5005
5006                 default:
5007                         break;
5008                 };
5009         }
5010 }
5011
5012 static int tg3_poll_fw(struct tg3 *tp)
5013 {
5014         int i;
5015         u32 val;
5016
5017         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5018                 /* Wait up to 20ms for init done. */
5019                 for (i = 0; i < 200; i++) {
5020                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5021                                 return 0;
5022                         udelay(100);
5023                 }
5024                 return -ENODEV;
5025         }
5026
5027         /* Wait for firmware initialization to complete. */
5028         for (i = 0; i < 100000; i++) {
5029                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5030                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5031                         break;
5032                 udelay(10);
5033         }
5034
5035         /* Chip might not be fitted with firmware.  Some Sun onboard
5036          * parts are configured like that.  So don't signal the timeout
5037          * of the above loop as an error, but do report the lack of
5038          * running firmware once.
5039          */
5040         if (i >= 100000 &&
5041             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5042                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5043
5044                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5045                        tp->dev->name);
5046         }
5047
5048         return 0;
5049 }
5050
5051 /* Save PCI command register before chip reset */
5052 static void tg3_save_pci_state(struct tg3 *tp)
5053 {
5054         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5055 }
5056
5057 /* Restore PCI state after chip reset */
5058 static void tg3_restore_pci_state(struct tg3 *tp)
5059 {
5060         u32 val;
5061
5062         /* Re-enable indirect register accesses. */
5063         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5064                                tp->misc_host_ctrl);
5065
5066         /* Set MAX PCI retry to zero. */
5067         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5068         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5069             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5070                 val |= PCISTATE_RETRY_SAME_DMA;
5071         /* Allow reads and writes to the APE register and memory space. */
5072         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5073                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5074                        PCISTATE_ALLOW_APE_SHMEM_WR;
5075         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5076
5077         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5078
5079         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5080                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5081                                       tp->pci_cacheline_sz);
5082                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5083                                       tp->pci_lat_timer);
5084         }
5085         /* Make sure PCI-X relaxed ordering bit is clear. */
5086         if (tp->pcix_cap) {
5087                 u16 pcix_cmd;
5088
5089                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5090                                      &pcix_cmd);
5091                 pcix_cmd &= ~PCI_X_CMD_ERO;
5092                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5093                                       pcix_cmd);
5094         }
5095
5096         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5097
5098                 /* Chip reset on 5780 will reset MSI enable bit,
5099                  * so need to restore it.
5100                  */
5101                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5102                         u16 ctrl;
5103
5104                         pci_read_config_word(tp->pdev,
5105                                              tp->msi_cap + PCI_MSI_FLAGS,
5106                                              &ctrl);
5107                         pci_write_config_word(tp->pdev,
5108                                               tp->msi_cap + PCI_MSI_FLAGS,
5109                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5110                         val = tr32(MSGINT_MODE);
5111                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5112                 }
5113         }
5114 }
5115
5116 static void tg3_stop_fw(struct tg3 *);
5117
5118 /* tp->lock is held. */
5119 static int tg3_chip_reset(struct tg3 *tp)
5120 {
5121         u32 val;
5122         void (*write_op)(struct tg3 *, u32, u32);
5123         int err;
5124
5125         tg3_nvram_lock(tp);
5126
5127         /* No matching tg3_nvram_unlock() after this because
5128          * chip reset below will undo the nvram lock.
5129          */
5130         tp->nvram_lock_cnt = 0;
5131
5132         /* GRC_MISC_CFG core clock reset will clear the memory
5133          * enable bit in PCI register 4 and the MSI enable bit
5134          * on some chips, so we save relevant registers here.
5135          */
5136         tg3_save_pci_state(tp);
5137
5138         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5139             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5140             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5141             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5142             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5143                 tw32(GRC_FASTBOOT_PC, 0);
5144
5145         /*
5146          * We must avoid the readl() that normally takes place.
5147          * It locks machines, causes machine checks, and other
5148          * fun things.  So, temporarily disable the 5701
5149          * hardware workaround, while we do the reset.
5150          */
5151         write_op = tp->write32;
5152         if (write_op == tg3_write_flush_reg32)
5153                 tp->write32 = tg3_write32;
5154
5155         /* Prevent the irq handler from reading or writing PCI registers
5156          * during chip reset when the memory enable bit in the PCI command
5157          * register may be cleared.  The chip does not generate interrupt
5158          * at this time, but the irq handler may still be called due to irq
5159          * sharing or irqpoll.
5160          */
5161         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5162         if (tp->hw_status) {
5163                 tp->hw_status->status = 0;
5164                 tp->hw_status->status_tag = 0;
5165         }
5166         tp->last_tag = 0;
5167         smp_mb();
5168         synchronize_irq(tp->pdev->irq);
5169
5170         /* do the reset */
5171         val = GRC_MISC_CFG_CORECLK_RESET;
5172
5173         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5174                 if (tr32(0x7e2c) == 0x60) {
5175                         tw32(0x7e2c, 0x20);
5176                 }
5177                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5178                         tw32(GRC_MISC_CFG, (1 << 29));
5179                         val |= (1 << 29);
5180                 }
5181         }
5182
5183         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5184                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5185                 tw32(GRC_VCPU_EXT_CTRL,
5186                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5187         }
5188
5189         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5190                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5191         tw32(GRC_MISC_CFG, val);
5192
5193         /* restore 5701 hardware bug workaround write method */
5194         tp->write32 = write_op;
5195
5196         /* Unfortunately, we have to delay before the PCI read back.
5197          * Some 575X chips even will not respond to a PCI cfg access
5198          * when the reset command is given to the chip.
5199          *
5200          * How do these hardware designers expect things to work
5201          * properly if the PCI write is posted for a long period
5202          * of time?  It is always necessary to have some method by
5203          * which a register read back can occur to push the write
5204          * out which does the reset.
5205          *
5206          * For most tg3 variants the trick below was working.
5207          * Ho hum...
5208          */
5209         udelay(120);
5210
5211         /* Flush PCI posted writes.  The normal MMIO registers
5212          * are inaccessible at this time so this is the only
5213          * way to make this reliably (actually, this is no longer
5214          * the case, see above).  I tried to use indirect
5215          * register read/write but this upset some 5701 variants.
5216          */
5217         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5218
5219         udelay(120);
5220
5221         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5222                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5223                         int i;
5224                         u32 cfg_val;
5225
5226                         /* Wait for link training to complete.  */
5227                         for (i = 0; i < 5000; i++)
5228                                 udelay(100);
5229
5230                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5231                         pci_write_config_dword(tp->pdev, 0xc4,
5232                                                cfg_val | (1 << 15));
5233                 }
5234                 /* Set PCIE max payload size and clear error status.  */
5235                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5236         }
5237
5238         tg3_restore_pci_state(tp);
5239
5240         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5241
5242         val = 0;
5243         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5244                 val = tr32(MEMARB_MODE);
5245         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5246
5247         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5248                 tg3_stop_fw(tp);
5249                 tw32(0x5000, 0x400);
5250         }
5251
5252         tw32(GRC_MODE, tp->grc_mode);
5253
5254         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5255                 val = tr32(0xc4);
5256
5257                 tw32(0xc4, val | (1 << 15));
5258         }
5259
5260         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5261             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5262                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5263                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5264                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5265                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5266         }
5267
5268         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5269                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5270                 tw32_f(MAC_MODE, tp->mac_mode);
5271         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5272                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5273                 tw32_f(MAC_MODE, tp->mac_mode);
5274         } else
5275                 tw32_f(MAC_MODE, 0);
5276         udelay(40);
5277
5278         err = tg3_poll_fw(tp);
5279         if (err)
5280                 return err;
5281
5282         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5283             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5284                 val = tr32(0x7c00);
5285
5286                 tw32(0x7c00, val | (1 << 25));
5287         }
5288
5289         /* Reprobe ASF enable state.  */
5290         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5291         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5292         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5293         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5294                 u32 nic_cfg;
5295
5296                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5297                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5298                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5299                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5300                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5301                 }
5302         }
5303
5304         return 0;
5305 }
5306
5307 /* tp->lock is held. */
5308 static void tg3_stop_fw(struct tg3 *tp)
5309 {
5310         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5311            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5312                 u32 val;
5313                 int i;
5314
5315                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5316                 val = tr32(GRC_RX_CPU_EVENT);
5317                 val |= (1 << 14);
5318                 tw32(GRC_RX_CPU_EVENT, val);
5319
5320                 /* Wait for RX cpu to ACK the event.  */
5321                 for (i = 0; i < 100; i++) {
5322                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5323                                 break;
5324                         udelay(1);
5325                 }
5326         }
5327 }
5328
5329 /* tp->lock is held. */
5330 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5331 {
5332         int err;
5333
5334         tg3_stop_fw(tp);
5335
5336         tg3_write_sig_pre_reset(tp, kind);
5337
5338         tg3_abort_hw(tp, silent);
5339         err = tg3_chip_reset(tp);
5340
5341         tg3_write_sig_legacy(tp, kind);
5342         tg3_write_sig_post_reset(tp, kind);
5343
5344         if (err)
5345                 return err;
5346
5347         return 0;
5348 }
5349
5350 #define TG3_FW_RELEASE_MAJOR    0x0
5351 #define TG3_FW_RELASE_MINOR     0x0
5352 #define TG3_FW_RELEASE_FIX      0x0
5353 #define TG3_FW_START_ADDR       0x08000000
5354 #define TG3_FW_TEXT_ADDR        0x08000000
5355 #define TG3_FW_TEXT_LEN         0x9c0
5356 #define TG3_FW_RODATA_ADDR      0x080009c0
5357 #define TG3_FW_RODATA_LEN       0x60
5358 #define TG3_FW_DATA_ADDR        0x08000a40
5359 #define TG3_FW_DATA_LEN         0x20
5360 #define TG3_FW_SBSS_ADDR        0x08000a60
5361 #define TG3_FW_SBSS_LEN         0xc
5362 #define TG3_FW_BSS_ADDR         0x08000a70
5363 #define TG3_FW_BSS_LEN          0x10
5364
5365 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5366         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5367         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5368         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5369         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5370         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5371         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5372         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5373         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5374         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5375         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5376         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5377         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5378         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5379         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5380         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5381         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5382         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5383         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5384         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5385         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5386         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5387         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5388         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5389         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5390         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5391         0, 0, 0, 0, 0, 0,
5392         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5393         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5394         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5395         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5396         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5397         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5398         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5399         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5400         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5401         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5402         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5403         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5404         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5405         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5406         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5407         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5408         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5409         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5410         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5411         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5412         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5413         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5414         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5415         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5416         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5417         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5418         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5419         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5420         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5421         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5422         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5423         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5424         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5425         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5426         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5427         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5428         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5429         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5430         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5431         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5432         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5433         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5434         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5435         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5436         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5437         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5438         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5439         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5440         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5441         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5442         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5443         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5444         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5445         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5446         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5447         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5448         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5449         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5450         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5451         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5452         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5453         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5454         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5455         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5456         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5457 };
5458
5459 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5460         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5461         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5462         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5463         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5464         0x00000000
5465 };
5466
5467 #if 0 /* All zeros, don't eat up space with it. */
5468 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5469         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5470         0x00000000, 0x00000000, 0x00000000, 0x00000000
5471 };
5472 #endif
5473
5474 #define RX_CPU_SCRATCH_BASE     0x30000
5475 #define RX_CPU_SCRATCH_SIZE     0x04000
5476 #define TX_CPU_SCRATCH_BASE     0x34000
5477 #define TX_CPU_SCRATCH_SIZE     0x04000
5478
5479 /* tp->lock is held. */
5480 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5481 {
5482         int i;
5483
5484         BUG_ON(offset == TX_CPU_BASE &&
5485             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5486
5487         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5488                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5489
5490                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5491                 return 0;
5492         }
5493         if (offset == RX_CPU_BASE) {
5494                 for (i = 0; i < 10000; i++) {
5495                         tw32(offset + CPU_STATE, 0xffffffff);
5496                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5497                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5498                                 break;
5499                 }
5500
5501                 tw32(offset + CPU_STATE, 0xffffffff);
5502                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5503                 udelay(10);
5504         } else {
5505                 for (i = 0; i < 10000; i++) {
5506                         tw32(offset + CPU_STATE, 0xffffffff);
5507                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5508                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5509                                 break;
5510                 }
5511         }
5512
5513         if (i >= 10000) {
5514                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5515                        "and %s CPU\n",
5516                        tp->dev->name,
5517                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5518                 return -ENODEV;
5519         }
5520
5521         /* Clear firmware's nvram arbitration. */
5522         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5523                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5524         return 0;
5525 }
5526
5527 struct fw_info {
5528         unsigned int text_base;
5529         unsigned int text_len;
5530         const u32 *text_data;
5531         unsigned int rodata_base;
5532         unsigned int rodata_len;
5533         const u32 *rodata_data;
5534         unsigned int data_base;
5535         unsigned int data_len;
5536         const u32 *data_data;
5537 };
5538
5539 /* tp->lock is held. */
5540 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5541                                  int cpu_scratch_size, struct fw_info *info)
5542 {
5543         int err, lock_err, i;
5544         void (*write_op)(struct tg3 *, u32, u32);
5545
5546         if (cpu_base == TX_CPU_BASE &&
5547             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5548                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5549                        "TX cpu firmware on %s which is 5705.\n",
5550                        tp->dev->name);
5551                 return -EINVAL;
5552         }
5553
5554         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5555                 write_op = tg3_write_mem;
5556         else
5557                 write_op = tg3_write_indirect_reg32;
5558
5559         /* It is possible that bootcode is still loading at this point.
5560          * Get the nvram lock first before halting the cpu.
5561          */
5562         lock_err = tg3_nvram_lock(tp);
5563         err = tg3_halt_cpu(tp, cpu_base);
5564         if (!lock_err)
5565                 tg3_nvram_unlock(tp);
5566         if (err)
5567                 goto out;
5568
5569         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5570                 write_op(tp, cpu_scratch_base + i, 0);
5571         tw32(cpu_base + CPU_STATE, 0xffffffff);
5572         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5573         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5574                 write_op(tp, (cpu_scratch_base +
5575                               (info->text_base & 0xffff) +
5576                               (i * sizeof(u32))),
5577                          (info->text_data ?
5578                           info->text_data[i] : 0));
5579         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5580                 write_op(tp, (cpu_scratch_base +
5581                               (info->rodata_base & 0xffff) +
5582                               (i * sizeof(u32))),
5583                          (info->rodata_data ?
5584                           info->rodata_data[i] : 0));
5585         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5586                 write_op(tp, (cpu_scratch_base +
5587                               (info->data_base & 0xffff) +
5588                               (i * sizeof(u32))),
5589                          (info->data_data ?
5590                           info->data_data[i] : 0));
5591
5592         err = 0;
5593
5594 out:
5595         return err;
5596 }
5597
5598 /* tp->lock is held. */
5599 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5600 {
5601         struct fw_info info;
5602         int err, i;
5603
5604         info.text_base = TG3_FW_TEXT_ADDR;
5605         info.text_len = TG3_FW_TEXT_LEN;
5606         info.text_data = &tg3FwText[0];
5607         info.rodata_base = TG3_FW_RODATA_ADDR;
5608         info.rodata_len = TG3_FW_RODATA_LEN;
5609         info.rodata_data = &tg3FwRodata[0];
5610         info.data_base = TG3_FW_DATA_ADDR;
5611         info.data_len = TG3_FW_DATA_LEN;
5612         info.data_data = NULL;
5613
5614         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5615                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5616                                     &info);
5617         if (err)
5618                 return err;
5619
5620         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5621                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5622                                     &info);
5623         if (err)
5624                 return err;
5625
5626         /* Now startup only the RX cpu. */
5627         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5628         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5629
5630         for (i = 0; i < 5; i++) {
5631                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5632                         break;
5633                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5634                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5635                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5636                 udelay(1000);
5637         }
5638         if (i >= 5) {
5639                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5640                        "to set RX CPU PC, is %08x should be %08x\n",
5641                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5642                        TG3_FW_TEXT_ADDR);
5643                 return -ENODEV;
5644         }
5645         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5646         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5647
5648         return 0;
5649 }
5650
5651
5652 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5653 #define TG3_TSO_FW_RELASE_MINOR         0x6
5654 #define TG3_TSO_FW_RELEASE_FIX          0x0
5655 #define TG3_TSO_FW_START_ADDR           0x08000000
5656 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5657 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5658 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5659 #define TG3_TSO_FW_RODATA_LEN           0x60
5660 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5661 #define TG3_TSO_FW_DATA_LEN             0x30
5662 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5663 #define TG3_TSO_FW_SBSS_LEN             0x2c
5664 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5665 #define TG3_TSO_FW_BSS_LEN              0x894
5666
5667 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5668         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5669         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5670         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5671         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5672         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5673         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5674         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5675         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5676         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5677         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5678         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5679         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5680         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5681         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5682         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5683         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5684         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5685         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5686         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5687         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5688         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5689         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5690         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5691         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5692         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5693         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5694         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5695         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5696         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5697         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5698         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5699         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5700         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5701         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5702         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5703         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5704         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5705         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5706         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5707         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5708         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5709         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5710         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5711         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5712         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5713         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5714         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5715         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5716         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5717         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5718         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5719         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5720         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5721         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5722         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5723         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5724         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5725         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5726         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5727         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5728         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5729         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5730         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5731         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5732         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5733         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5734         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5735         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5736         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5737         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5738         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5739         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5740         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5741         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5742         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5743         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5744         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5745         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5746         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5747         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5748         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5749         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5750         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5751         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5752         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5753         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5754         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5755         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5756         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5757         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5758         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5759         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5760         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5761         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5762         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5763         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5764         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5765         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5766         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5767         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5768         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5769         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5770         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5771         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5772         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5773         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5774         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5775         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5776         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5777         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5778         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5779         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5780         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5781         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5782         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5783         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5784         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5785         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5786         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5787         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5788         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5789         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5790         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5791         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5792         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5793         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5794         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5795         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5796         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5797         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5798         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5799         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5800         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5801         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5802         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5803         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5804         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5805         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5806         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5807         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5808         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5809         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5810         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5811         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5812         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5813         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5814         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5815         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5816         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5817         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5818         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5819         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5820         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5821         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5822         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5823         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5824         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5825         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5826         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5827         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5828         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5829         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5830         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5831         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5832         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5833         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5834         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5835         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5836         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5837         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5838         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5839         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5840         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5841         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5842         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5843         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5844         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5845         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5846         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5847         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5848         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5849         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5850         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5851         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5852         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5853         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5854         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5855         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5856         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5857         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5858         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5859         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5860         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5861         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5862         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5863         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5864         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5865         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5866         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5867         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5868         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5869         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5870         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5871         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5872         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5873         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5874         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5875         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5876         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5877         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5878         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5879         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5880         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5881         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5882         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5883         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5884         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5885         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5886         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5887         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5888         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5889         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5890         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5891         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5892         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5893         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5894         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5895         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5896         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5897         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5898         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5899         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5900         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5901         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5902         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5903         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5904         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5905         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5906         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5907         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5908         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5909         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5910         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5911         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5912         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5913         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5914         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5915         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5916         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5917         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5918         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5919         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5920         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5921         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5922         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5923         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5924         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5925         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5926         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5927         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5928         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5929         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5930         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5931         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5932         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5933         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5934         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5935         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5936         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5937         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5938         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5939         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5940         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5941         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5942         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5943         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5944         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5945         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5946         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5947         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5948         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5949         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5950         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5951         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5952 };
5953
5954 static const u32 tg3TsoFwRodata[] = {
5955         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5956         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5957         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5958         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5959         0x00000000,
5960 };
5961
5962 static const u32 tg3TsoFwData[] = {
5963         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5964         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5965         0x00000000,
5966 };
5967
5968 /* 5705 needs a special version of the TSO firmware.  */
5969 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5970 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5971 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5972 #define TG3_TSO5_FW_START_ADDR          0x00010000
5973 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5974 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5975 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5976 #define TG3_TSO5_FW_RODATA_LEN          0x50
5977 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5978 #define TG3_TSO5_FW_DATA_LEN            0x20
5979 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5980 #define TG3_TSO5_FW_SBSS_LEN            0x28
5981 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5982 #define TG3_TSO5_FW_BSS_LEN             0x88
5983
5984 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5985         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5986         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5987         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5988         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5989         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5990         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5991         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5992         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5993         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5994         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5995         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5996         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5997         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5998         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5999         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6000         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6001         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6002         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6003         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6004         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6005         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6006         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6007         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6008         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6009         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6010         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6011         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6012         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6013         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6014         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6015         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6016         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6017         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6018         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6019         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6020         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6021         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6022         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6023         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6024         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6025         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6026         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6027         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6028         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6029         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6030         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6031         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6032         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6033         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6034         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6035         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6036         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6037         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6038         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6039         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6040         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6041         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6042         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6043         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6044         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6045         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6046         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6047         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6048         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6049         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6050         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6051         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6052         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6053         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6054         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6055         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6056         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6057         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6058         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6059         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6060         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6061         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6062         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6063         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6064         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6065         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6066         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6067         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6068         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6069         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6070         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6071         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6072         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6073         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6074         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6075         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6076         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6077         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6078         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6079         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6080         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6081         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6082         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6083         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6084         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6085         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6086         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6087         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6088         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6089         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6090         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6091         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6092         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6093         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6094         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6095         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6096         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6097         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6098         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6099         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6100         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6101         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6102         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6103         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6104         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6105         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6106         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6107         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6108         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6109         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6110         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6111         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6112         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6113         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6114         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6115         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6116         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6117         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6118         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6119         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6120         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6121         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6122         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6123         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6124         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6125         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6126         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6127         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6128         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6129         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6130         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6131         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6132         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6133         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6134         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6135         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6136         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6137         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6138         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6139         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6140         0x00000000, 0x00000000, 0x00000000,
6141 };
6142
6143 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6144         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6145         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6146         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6147         0x00000000, 0x00000000, 0x00000000,
6148 };
6149
6150 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6151         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6152         0x00000000, 0x00000000, 0x00000000,
6153 };
6154
6155 /* tp->lock is held. */
6156 static int tg3_load_tso_firmware(struct tg3 *tp)
6157 {
6158         struct fw_info info;
6159         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6160         int err, i;
6161
6162         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6163                 return 0;
6164
6165         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6166                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6167                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6168                 info.text_data = &tg3Tso5FwText[0];
6169                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6170                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6171                 info.rodata_data = &tg3Tso5FwRodata[0];
6172                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6173                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6174                 info.data_data = &tg3Tso5FwData[0];
6175                 cpu_base = RX_CPU_BASE;
6176                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6177                 cpu_scratch_size = (info.text_len +
6178                                     info.rodata_len +
6179                                     info.data_len +
6180                                     TG3_TSO5_FW_SBSS_LEN +
6181                                     TG3_TSO5_FW_BSS_LEN);
6182         } else {
6183                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6184                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6185                 info.text_data = &tg3TsoFwText[0];
6186                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6187                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6188                 info.rodata_data = &tg3TsoFwRodata[0];
6189                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6190                 info.data_len = TG3_TSO_FW_DATA_LEN;
6191                 info.data_data = &tg3TsoFwData[0];
6192                 cpu_base = TX_CPU_BASE;
6193                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6194                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6195         }
6196
6197         err = tg3_load_firmware_cpu(tp, cpu_base,
6198                                     cpu_scratch_base, cpu_scratch_size,
6199                                     &info);
6200         if (err)
6201                 return err;
6202
6203         /* Now startup the cpu. */
6204         tw32(cpu_base + CPU_STATE, 0xffffffff);
6205         tw32_f(cpu_base + CPU_PC,    info.text_base);
6206
6207         for (i = 0; i < 5; i++) {
6208                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6209                         break;
6210                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6211                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6212                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6213                 udelay(1000);
6214         }
6215         if (i >= 5) {
6216                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6217                        "to set CPU PC, is %08x should be %08x\n",
6218                        tp->dev->name, tr32(cpu_base + CPU_PC),
6219                        info.text_base);
6220                 return -ENODEV;
6221         }
6222         tw32(cpu_base + CPU_STATE, 0xffffffff);
6223         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6224         return 0;
6225 }
6226
6227
6228 /* tp->lock is held. */
6229 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6230 {
6231         u32 addr_high, addr_low;
6232         int i;
6233
6234         addr_high = ((tp->dev->dev_addr[0] << 8) |
6235                      tp->dev->dev_addr[1]);
6236         addr_low = ((tp->dev->dev_addr[2] << 24) |
6237                     (tp->dev->dev_addr[3] << 16) |
6238                     (tp->dev->dev_addr[4] <<  8) |
6239                     (tp->dev->dev_addr[5] <<  0));
6240         for (i = 0; i < 4; i++) {
6241                 if (i == 1 && skip_mac_1)
6242                         continue;
6243                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6244                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6245         }
6246
6247         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6249                 for (i = 0; i < 12; i++) {
6250                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6251                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6252                 }
6253         }
6254
6255         addr_high = (tp->dev->dev_addr[0] +
6256                      tp->dev->dev_addr[1] +
6257                      tp->dev->dev_addr[2] +
6258                      tp->dev->dev_addr[3] +
6259                      tp->dev->dev_addr[4] +
6260                      tp->dev->dev_addr[5]) &
6261                 TX_BACKOFF_SEED_MASK;
6262         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6263 }
6264
6265 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6266 {
6267         struct tg3 *tp = netdev_priv(dev);
6268         struct sockaddr *addr = p;
6269         int err = 0, skip_mac_1 = 0;
6270
6271         if (!is_valid_ether_addr(addr->sa_data))
6272                 return -EINVAL;
6273
6274         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6275
6276         if (!netif_running(dev))
6277                 return 0;
6278
6279         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6280                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6281
6282                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6283                 addr0_low = tr32(MAC_ADDR_0_LOW);
6284                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6285                 addr1_low = tr32(MAC_ADDR_1_LOW);
6286
6287                 /* Skip MAC addr 1 if ASF is using it. */
6288                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6289                     !(addr1_high == 0 && addr1_low == 0))
6290                         skip_mac_1 = 1;
6291         }
6292         spin_lock_bh(&tp->lock);
6293         __tg3_set_mac_addr(tp, skip_mac_1);
6294         spin_unlock_bh(&tp->lock);
6295
6296         return err;
6297 }
6298
6299 /* tp->lock is held. */
6300 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6301                            dma_addr_t mapping, u32 maxlen_flags,
6302                            u32 nic_addr)
6303 {
6304         tg3_write_mem(tp,
6305                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6306                       ((u64) mapping >> 32));
6307         tg3_write_mem(tp,
6308                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6309                       ((u64) mapping & 0xffffffff));
6310         tg3_write_mem(tp,
6311                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6312                        maxlen_flags);
6313
6314         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6315                 tg3_write_mem(tp,
6316                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6317                               nic_addr);
6318 }
6319
6320 static void __tg3_set_rx_mode(struct net_device *);
6321 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6322 {
6323         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6324         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6325         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6326         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6327         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6328                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6329                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6330         }
6331         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6332         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6333         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6334                 u32 val = ec->stats_block_coalesce_usecs;
6335
6336                 if (!netif_carrier_ok(tp->dev))
6337                         val = 0;
6338
6339                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6340         }
6341 }
6342
6343 /* tp->lock is held. */
6344 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6345 {
6346         u32 val, rdmac_mode;
6347         int i, err, limit;
6348
6349         tg3_disable_ints(tp);
6350
6351         tg3_stop_fw(tp);
6352
6353         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6354
6355         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6356                 tg3_abort_hw(tp, 1);
6357         }
6358
6359         if (reset_phy)
6360                 tg3_phy_reset(tp);
6361
6362         err = tg3_chip_reset(tp);
6363         if (err)
6364                 return err;
6365
6366         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6367
6368         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6369                 val = tr32(TG3_CPMU_CTRL);
6370                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6371                 tw32(TG3_CPMU_CTRL, val);
6372         }
6373
6374         /* This works around an issue with Athlon chipsets on
6375          * B3 tigon3 silicon.  This bit has no effect on any
6376          * other revision.  But do not set this on PCI Express
6377          * chips and don't even touch the clocks if the CPMU is present.
6378          */
6379         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6380                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6381                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6382                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6383         }
6384
6385         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6386             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6387                 val = tr32(TG3PCI_PCISTATE);
6388                 val |= PCISTATE_RETRY_SAME_DMA;
6389                 tw32(TG3PCI_PCISTATE, val);
6390         }
6391
6392         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6393                 /* Allow reads and writes to the
6394                  * APE register and memory space.
6395                  */
6396                 val = tr32(TG3PCI_PCISTATE);
6397                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6398                        PCISTATE_ALLOW_APE_SHMEM_WR;
6399                 tw32(TG3PCI_PCISTATE, val);
6400         }
6401
6402         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6403                 /* Enable some hw fixes.  */
6404                 val = tr32(TG3PCI_MSI_DATA);
6405                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6406                 tw32(TG3PCI_MSI_DATA, val);
6407         }
6408
6409         /* Descriptor ring init may make accesses to the
6410          * NIC SRAM area to setup the TX descriptors, so we
6411          * can only do this after the hardware has been
6412          * successfully reset.
6413          */
6414         err = tg3_init_rings(tp);
6415         if (err)
6416                 return err;
6417
6418         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6419             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6420                 /* This value is determined during the probe time DMA
6421                  * engine test, tg3_test_dma.
6422                  */
6423                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6424         }
6425
6426         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6427                           GRC_MODE_4X_NIC_SEND_RINGS |
6428                           GRC_MODE_NO_TX_PHDR_CSUM |
6429                           GRC_MODE_NO_RX_PHDR_CSUM);
6430         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6431
6432         /* Pseudo-header checksum is done by hardware logic and not
6433          * the offload processers, so make the chip do the pseudo-
6434          * header checksums on receive.  For transmit it is more
6435          * convenient to do the pseudo-header checksum in software
6436          * as Linux does that on transmit for us in all cases.
6437          */
6438         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6439
6440         tw32(GRC_MODE,
6441              tp->grc_mode |
6442              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6443
6444         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6445         val = tr32(GRC_MISC_CFG);
6446         val &= ~0xff;
6447         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6448         tw32(GRC_MISC_CFG, val);
6449
6450         /* Initialize MBUF/DESC pool. */
6451         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6452                 /* Do nothing.  */
6453         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6454                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6455                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6456                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6457                 else
6458                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6459                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6460                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6461         }
6462         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6463                 int fw_len;
6464
6465                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6466                           TG3_TSO5_FW_RODATA_LEN +
6467                           TG3_TSO5_FW_DATA_LEN +
6468                           TG3_TSO5_FW_SBSS_LEN +
6469                           TG3_TSO5_FW_BSS_LEN);
6470                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6471                 tw32(BUFMGR_MB_POOL_ADDR,
6472                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6473                 tw32(BUFMGR_MB_POOL_SIZE,
6474                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6475         }
6476
6477         if (tp->dev->mtu <= ETH_DATA_LEN) {
6478                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6479                      tp->bufmgr_config.mbuf_read_dma_low_water);
6480                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6481                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6482                 tw32(BUFMGR_MB_HIGH_WATER,
6483                      tp->bufmgr_config.mbuf_high_water);
6484         } else {
6485                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6486                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6487                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6488                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6489                 tw32(BUFMGR_MB_HIGH_WATER,
6490                      tp->bufmgr_config.mbuf_high_water_jumbo);
6491         }
6492         tw32(BUFMGR_DMA_LOW_WATER,
6493              tp->bufmgr_config.dma_low_water);
6494         tw32(BUFMGR_DMA_HIGH_WATER,
6495              tp->bufmgr_config.dma_high_water);
6496
6497         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6498         for (i = 0; i < 2000; i++) {
6499                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6500                         break;
6501                 udelay(10);
6502         }
6503         if (i >= 2000) {
6504                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6505                        tp->dev->name);
6506                 return -ENODEV;
6507         }
6508
6509         /* Setup replenish threshold. */
6510         val = tp->rx_pending / 8;
6511         if (val == 0)
6512                 val = 1;
6513         else if (val > tp->rx_std_max_post)
6514                 val = tp->rx_std_max_post;
6515         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6516                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6517                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6518
6519                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6520                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6521         }
6522
6523         tw32(RCVBDI_STD_THRESH, val);
6524
6525         /* Initialize TG3_BDINFO's at:
6526          *  RCVDBDI_STD_BD:     standard eth size rx ring
6527          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6528          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6529          *
6530          * like so:
6531          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6532          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6533          *                              ring attribute flags
6534          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6535          *
6536          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6537          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6538          *
6539          * The size of each ring is fixed in the firmware, but the location is
6540          * configurable.
6541          */
6542         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6543              ((u64) tp->rx_std_mapping >> 32));
6544         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6545              ((u64) tp->rx_std_mapping & 0xffffffff));
6546         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6547              NIC_SRAM_RX_BUFFER_DESC);
6548
6549         /* Don't even try to program the JUMBO/MINI buffer descriptor
6550          * configs on 5705.
6551          */
6552         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6553                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6554                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6555         } else {
6556                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6557                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6558
6559                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6560                      BDINFO_FLAGS_DISABLED);
6561
6562                 /* Setup replenish threshold. */
6563                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6564
6565                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6566                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6567                              ((u64) tp->rx_jumbo_mapping >> 32));
6568                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6569                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6570                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6571                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6572                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6573                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6574                 } else {
6575                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6576                              BDINFO_FLAGS_DISABLED);
6577                 }
6578
6579         }
6580
6581         /* There is only one send ring on 5705/5750, no need to explicitly
6582          * disable the others.
6583          */
6584         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6585                 /* Clear out send RCB ring in SRAM. */
6586                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6587                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6588                                       BDINFO_FLAGS_DISABLED);
6589         }
6590
6591         tp->tx_prod = 0;
6592         tp->tx_cons = 0;
6593         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6594         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6595
6596         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6597                        tp->tx_desc_mapping,
6598                        (TG3_TX_RING_SIZE <<
6599                         BDINFO_FLAGS_MAXLEN_SHIFT),
6600                        NIC_SRAM_TX_BUFFER_DESC);
6601
6602         /* There is only one receive return ring on 5705/5750, no need
6603          * to explicitly disable the others.
6604          */
6605         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6606                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6607                      i += TG3_BDINFO_SIZE) {
6608                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6609                                       BDINFO_FLAGS_DISABLED);
6610                 }
6611         }
6612
6613         tp->rx_rcb_ptr = 0;
6614         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6615
6616         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6617                        tp->rx_rcb_mapping,
6618                        (TG3_RX_RCB_RING_SIZE(tp) <<
6619                         BDINFO_FLAGS_MAXLEN_SHIFT),
6620                        0);
6621
6622         tp->rx_std_ptr = tp->rx_pending;
6623         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6624                      tp->rx_std_ptr);
6625
6626         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6627                                                 tp->rx_jumbo_pending : 0;
6628         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6629                      tp->rx_jumbo_ptr);
6630
6631         /* Initialize MAC address and backoff seed. */
6632         __tg3_set_mac_addr(tp, 0);
6633
6634         /* MTU + ethernet header + FCS + optional VLAN tag */
6635         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6636
6637         /* The slot time is changed by tg3_setup_phy if we
6638          * run at gigabit with half duplex.
6639          */
6640         tw32(MAC_TX_LENGTHS,
6641              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6642              (6 << TX_LENGTHS_IPG_SHIFT) |
6643              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6644
6645         /* Receive rules. */
6646         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6647         tw32(RCVLPC_CONFIG, 0x0181);
6648
6649         /* Calculate RDMAC_MODE setting early, we need it to determine
6650          * the RCVLPC_STATE_ENABLE mask.
6651          */
6652         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6653                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6654                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6655                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6656                       RDMAC_MODE_LNGREAD_ENAB);
6657
6658         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6659                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6660                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6661                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6662
6663         /* If statement applies to 5705 and 5750 PCI devices only */
6664         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6665              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6666             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6667                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6668                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6669                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6670                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6671                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6672                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6673                 }
6674         }
6675
6676         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6677                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6678
6679         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6680                 rdmac_mode |= (1 << 27);
6681
6682         /* Receive/send statistics. */
6683         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6684                 val = tr32(RCVLPC_STATS_ENABLE);
6685                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6686                 tw32(RCVLPC_STATS_ENABLE, val);
6687         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6688                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6689                 val = tr32(RCVLPC_STATS_ENABLE);
6690                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6691                 tw32(RCVLPC_STATS_ENABLE, val);
6692         } else {
6693                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6694         }
6695         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6696         tw32(SNDDATAI_STATSENAB, 0xffffff);
6697         tw32(SNDDATAI_STATSCTRL,
6698              (SNDDATAI_SCTRL_ENABLE |
6699               SNDDATAI_SCTRL_FASTUPD));
6700
6701         /* Setup host coalescing engine. */
6702         tw32(HOSTCC_MODE, 0);
6703         for (i = 0; i < 2000; i++) {
6704                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6705                         break;
6706                 udelay(10);
6707         }
6708
6709         __tg3_set_coalesce(tp, &tp->coal);
6710
6711         /* set status block DMA address */
6712         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6713              ((u64) tp->status_mapping >> 32));
6714         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6715              ((u64) tp->status_mapping & 0xffffffff));
6716
6717         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6718                 /* Status/statistics block address.  See tg3_timer,
6719                  * the tg3_periodic_fetch_stats call there, and
6720                  * tg3_get_stats to see how this works for 5705/5750 chips.
6721                  */
6722                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6723                      ((u64) tp->stats_mapping >> 32));
6724                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6725                      ((u64) tp->stats_mapping & 0xffffffff));
6726                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6727                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6728         }
6729
6730         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6731
6732         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6733         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6734         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6735                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6736
6737         /* Clear statistics/status block in chip, and status block in ram. */
6738         for (i = NIC_SRAM_STATS_BLK;
6739              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6740              i += sizeof(u32)) {
6741                 tg3_write_mem(tp, i, 0);
6742                 udelay(40);
6743         }
6744         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6745
6746         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6747                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6748                 /* reset to prevent losing 1st rx packet intermittently */
6749                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6750                 udelay(10);
6751         }
6752
6753         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6754                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6755         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6756             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6757             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6758                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6759         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6760         udelay(40);
6761
6762         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6763          * If TG3_FLG2_IS_NIC is zero, we should read the
6764          * register to preserve the GPIO settings for LOMs. The GPIOs,
6765          * whether used as inputs or outputs, are set by boot code after
6766          * reset.
6767          */
6768         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6769                 u32 gpio_mask;
6770
6771                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6772                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6773                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6774
6775                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6776                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6777                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6778
6779                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6780                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6781
6782                 tp->grc_local_ctrl &= ~gpio_mask;
6783                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6784
6785                 /* GPIO1 must be driven high for eeprom write protect */
6786                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6787                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6788                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6789         }
6790         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6791         udelay(100);
6792
6793         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6794         tp->last_tag = 0;
6795
6796         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6797                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6798                 udelay(40);
6799         }
6800
6801         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6802                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6803                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6804                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6805                WDMAC_MODE_LNGREAD_ENAB);
6806
6807         /* If statement applies to 5705 and 5750 PCI devices only */
6808         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6809              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6810             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6811                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6812                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6813                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6814                         /* nothing */
6815                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6816                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6817                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6818                         val |= WDMAC_MODE_RX_ACCEL;
6819                 }
6820         }
6821
6822         /* Enable host coalescing bug fix */
6823         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6824             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6825             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6826             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6827                 val |= (1 << 29);
6828
6829         tw32_f(WDMAC_MODE, val);
6830         udelay(40);
6831
6832         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6833                 u16 pcix_cmd;
6834
6835                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6836                                      &pcix_cmd);
6837                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6838                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6839                         pcix_cmd |= PCI_X_CMD_READ_2K;
6840                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6841                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6842                         pcix_cmd |= PCI_X_CMD_READ_2K;
6843                 }
6844                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6845                                       pcix_cmd);
6846         }
6847
6848         tw32_f(RDMAC_MODE, rdmac_mode);
6849         udelay(40);
6850
6851         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6852         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6853                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6854
6855         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6856                 tw32(SNDDATAC_MODE,
6857                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6858         else
6859                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6860
6861         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6862         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6863         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6864         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6865         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6866                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6867         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6868         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6869
6870         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6871                 err = tg3_load_5701_a0_firmware_fix(tp);
6872                 if (err)
6873                         return err;
6874         }
6875
6876         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6877                 err = tg3_load_tso_firmware(tp);
6878                 if (err)
6879                         return err;
6880         }
6881
6882         tp->tx_mode = TX_MODE_ENABLE;
6883         tw32_f(MAC_TX_MODE, tp->tx_mode);
6884         udelay(100);
6885
6886         tp->rx_mode = RX_MODE_ENABLE;
6887         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6888             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6889                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6890
6891         tw32_f(MAC_RX_MODE, tp->rx_mode);
6892         udelay(10);
6893
6894         if (tp->link_config.phy_is_low_power) {
6895                 tp->link_config.phy_is_low_power = 0;
6896                 tp->link_config.speed = tp->link_config.orig_speed;
6897                 tp->link_config.duplex = tp->link_config.orig_duplex;
6898                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6899         }
6900
6901         tp->mi_mode = MAC_MI_MODE_BASE;
6902         tw32_f(MAC_MI_MODE, tp->mi_mode);
6903         udelay(80);
6904
6905         tw32(MAC_LED_CTRL, tp->led_ctrl);
6906
6907         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6908         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6909                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6910                 udelay(10);
6911         }
6912         tw32_f(MAC_RX_MODE, tp->rx_mode);
6913         udelay(10);
6914
6915         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6916                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6917                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6918                         /* Set drive transmission level to 1.2V  */
6919                         /* only if the signal pre-emphasis bit is not set  */
6920                         val = tr32(MAC_SERDES_CFG);
6921                         val &= 0xfffff000;
6922                         val |= 0x880;
6923                         tw32(MAC_SERDES_CFG, val);
6924                 }
6925                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6926                         tw32(MAC_SERDES_CFG, 0x616000);
6927         }
6928
6929         /* Prevent chip from dropping frames when flow control
6930          * is enabled.
6931          */
6932         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6933
6934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6935             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6936                 /* Use hardware link auto-negotiation */
6937                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6938         }
6939
6940         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6941             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6942                 u32 tmp;
6943
6944                 tmp = tr32(SERDES_RX_CTRL);
6945                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6946                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6947                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6948                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6949         }
6950
6951         err = tg3_setup_phy(tp, 0);
6952         if (err)
6953                 return err;
6954
6955         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6956             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6957                 u32 tmp;
6958
6959                 /* Clear CRC stats. */
6960                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6961                         tg3_writephy(tp, MII_TG3_TEST1,
6962                                      tmp | MII_TG3_TEST1_CRC_EN);
6963                         tg3_readphy(tp, 0x14, &tmp);
6964                 }
6965         }
6966
6967         __tg3_set_rx_mode(tp->dev);
6968
6969         /* Initialize receive rules. */
6970         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6971         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6972         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6973         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6974
6975         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6976             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6977                 limit = 8;
6978         else
6979                 limit = 16;
6980         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6981                 limit -= 4;
6982         switch (limit) {
6983         case 16:
6984                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6985         case 15:
6986                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6987         case 14:
6988                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6989         case 13:
6990                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6991         case 12:
6992                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6993         case 11:
6994                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6995         case 10:
6996                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6997         case 9:
6998                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6999         case 8:
7000                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7001         case 7:
7002                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7003         case 6:
7004                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7005         case 5:
7006                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7007         case 4:
7008                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7009         case 3:
7010                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7011         case 2:
7012         case 1:
7013
7014         default:
7015                 break;
7016         };
7017
7018         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7019                 /* Write our heartbeat update interval to APE. */
7020                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7021                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7022
7023         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7024
7025         return 0;
7026 }
7027
7028 /* Called at device open time to get the chip ready for
7029  * packet processing.  Invoked with tp->lock held.
7030  */
7031 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7032 {
7033         int err;
7034
7035         /* Force the chip into D0. */
7036         err = tg3_set_power_state(tp, PCI_D0);
7037         if (err)
7038                 goto out;
7039
7040         tg3_switch_clocks(tp);
7041
7042         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7043
7044         err = tg3_reset_hw(tp, reset_phy);
7045
7046 out:
7047         return err;
7048 }
7049
7050 #define TG3_STAT_ADD32(PSTAT, REG) \
7051 do {    u32 __val = tr32(REG); \
7052         (PSTAT)->low += __val; \
7053         if ((PSTAT)->low < __val) \
7054                 (PSTAT)->high += 1; \
7055 } while (0)
7056
7057 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7058 {
7059         struct tg3_hw_stats *sp = tp->hw_stats;
7060
7061         if (!netif_carrier_ok(tp->dev))
7062                 return;
7063
7064         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7065         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7066         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7067         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7068         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7069         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7070         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7071         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7072         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7073         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7074         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7075         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7076         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7077
7078         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7079         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7080         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7081         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7082         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7083         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7084         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7085         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7086         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7087         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7088         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7089         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7090         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7091         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7092
7093         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7094         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7095         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7096 }
7097
7098 static void tg3_timer(unsigned long __opaque)
7099 {
7100         struct tg3 *tp = (struct tg3 *) __opaque;
7101
7102         if (tp->irq_sync)
7103                 goto restart_timer;
7104
7105         spin_lock(&tp->lock);
7106
7107         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7108                 /* All of this garbage is because when using non-tagged
7109                  * IRQ status the mailbox/status_block protocol the chip
7110                  * uses with the cpu is race prone.
7111                  */
7112                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7113                         tw32(GRC_LOCAL_CTRL,
7114                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7115                 } else {
7116                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7117                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7118                 }
7119
7120                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7121                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7122                         spin_unlock(&tp->lock);
7123                         schedule_work(&tp->reset_task);
7124                         return;
7125                 }
7126         }
7127
7128         /* This part only runs once per second. */
7129         if (!--tp->timer_counter) {
7130                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7131                         tg3_periodic_fetch_stats(tp);
7132
7133                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7134                         u32 mac_stat;
7135                         int phy_event;
7136
7137                         mac_stat = tr32(MAC_STATUS);
7138
7139                         phy_event = 0;
7140                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7141                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7142                                         phy_event = 1;
7143                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7144                                 phy_event = 1;
7145
7146                         if (phy_event)
7147                                 tg3_setup_phy(tp, 0);
7148                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7149                         u32 mac_stat = tr32(MAC_STATUS);
7150                         int need_setup = 0;
7151
7152                         if (netif_carrier_ok(tp->dev) &&
7153                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7154                                 need_setup = 1;
7155                         }
7156                         if (! netif_carrier_ok(tp->dev) &&
7157                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7158                                          MAC_STATUS_SIGNAL_DET))) {
7159                                 need_setup = 1;
7160                         }
7161                         if (need_setup) {
7162                                 if (!tp->serdes_counter) {
7163                                         tw32_f(MAC_MODE,
7164                                              (tp->mac_mode &
7165                                               ~MAC_MODE_PORT_MODE_MASK));
7166                                         udelay(40);
7167                                         tw32_f(MAC_MODE, tp->mac_mode);
7168                                         udelay(40);
7169                                 }
7170                                 tg3_setup_phy(tp, 0);
7171                         }
7172                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7173                         tg3_serdes_parallel_detect(tp);
7174
7175                 tp->timer_counter = tp->timer_multiplier;
7176         }
7177
7178         /* Heartbeat is only sent once every 2 seconds.
7179          *
7180          * The heartbeat is to tell the ASF firmware that the host
7181          * driver is still alive.  In the event that the OS crashes,
7182          * ASF needs to reset the hardware to free up the FIFO space
7183          * that may be filled with rx packets destined for the host.
7184          * If the FIFO is full, ASF will no longer function properly.
7185          *
7186          * Unintended resets have been reported on real time kernels
7187          * where the timer doesn't run on time.  Netpoll will also have
7188          * same problem.
7189          *
7190          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7191          * to check the ring condition when the heartbeat is expiring
7192          * before doing the reset.  This will prevent most unintended
7193          * resets.
7194          */
7195         if (!--tp->asf_counter) {
7196                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7197                         u32 val;
7198
7199                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7200                                       FWCMD_NICDRV_ALIVE3);
7201                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7202                         /* 5 seconds timeout */
7203                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7204                         val = tr32(GRC_RX_CPU_EVENT);
7205                         val |= (1 << 14);
7206                         tw32(GRC_RX_CPU_EVENT, val);
7207                 }
7208                 tp->asf_counter = tp->asf_multiplier;
7209         }
7210
7211         spin_unlock(&tp->lock);
7212
7213 restart_timer:
7214         tp->timer.expires = jiffies + tp->timer_offset;
7215         add_timer(&tp->timer);
7216 }
7217
7218 static int tg3_request_irq(struct tg3 *tp)
7219 {
7220         irq_handler_t fn;
7221         unsigned long flags;
7222         struct net_device *dev = tp->dev;
7223
7224         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7225                 fn = tg3_msi;
7226                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7227                         fn = tg3_msi_1shot;
7228                 flags = IRQF_SAMPLE_RANDOM;
7229         } else {
7230                 fn = tg3_interrupt;
7231                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7232                         fn = tg3_interrupt_tagged;
7233                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7234         }
7235         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7236 }
7237
7238 static int tg3_test_interrupt(struct tg3 *tp)
7239 {
7240         struct net_device *dev = tp->dev;
7241         int err, i, intr_ok = 0;
7242
7243         if (!netif_running(dev))
7244                 return -ENODEV;
7245
7246         tg3_disable_ints(tp);
7247
7248         free_irq(tp->pdev->irq, dev);
7249
7250         err = request_irq(tp->pdev->irq, tg3_test_isr,
7251                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7252         if (err)
7253                 return err;
7254
7255         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7256         tg3_enable_ints(tp);
7257
7258         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7259                HOSTCC_MODE_NOW);
7260
7261         for (i = 0; i < 5; i++) {
7262                 u32 int_mbox, misc_host_ctrl;
7263
7264                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7265                                         TG3_64BIT_REG_LOW);
7266                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7267
7268                 if ((int_mbox != 0) ||
7269                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7270                         intr_ok = 1;
7271                         break;
7272                 }
7273
7274                 msleep(10);
7275         }
7276
7277         tg3_disable_ints(tp);
7278
7279         free_irq(tp->pdev->irq, dev);
7280
7281         err = tg3_request_irq(tp);
7282
7283         if (err)
7284                 return err;
7285
7286         if (intr_ok)
7287                 return 0;
7288
7289         return -EIO;
7290 }
7291
7292 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7293  * successfully restored
7294  */
7295 static int tg3_test_msi(struct tg3 *tp)
7296 {
7297         struct net_device *dev = tp->dev;
7298         int err;
7299         u16 pci_cmd;
7300
7301         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7302                 return 0;
7303
7304         /* Turn off SERR reporting in case MSI terminates with Master
7305          * Abort.
7306          */
7307         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7308         pci_write_config_word(tp->pdev, PCI_COMMAND,
7309                               pci_cmd & ~PCI_COMMAND_SERR);
7310
7311         err = tg3_test_interrupt(tp);
7312
7313         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7314
7315         if (!err)
7316                 return 0;
7317
7318         /* other failures */
7319         if (err != -EIO)
7320                 return err;
7321
7322         /* MSI test failed, go back to INTx mode */
7323         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7324                "switching to INTx mode. Please report this failure to "
7325                "the PCI maintainer and include system chipset information.\n",
7326                        tp->dev->name);
7327
7328         free_irq(tp->pdev->irq, dev);
7329         pci_disable_msi(tp->pdev);
7330
7331         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7332
7333         err = tg3_request_irq(tp);
7334         if (err)
7335                 return err;
7336
7337         /* Need to reset the chip because the MSI cycle may have terminated
7338          * with Master Abort.
7339          */
7340         tg3_full_lock(tp, 1);
7341
7342         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7343         err = tg3_init_hw(tp, 1);
7344
7345         tg3_full_unlock(tp);
7346
7347         if (err)
7348                 free_irq(tp->pdev->irq, dev);
7349
7350         return err;
7351 }
7352
7353 static int tg3_open(struct net_device *dev)
7354 {
7355         struct tg3 *tp = netdev_priv(dev);
7356         int err;
7357
7358         netif_carrier_off(tp->dev);
7359
7360         tg3_full_lock(tp, 0);
7361
7362         err = tg3_set_power_state(tp, PCI_D0);
7363         if (err) {
7364                 tg3_full_unlock(tp);
7365                 return err;
7366         }
7367
7368         tg3_disable_ints(tp);
7369         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7370
7371         tg3_full_unlock(tp);
7372
7373         /* The placement of this call is tied
7374          * to the setup and use of Host TX descriptors.
7375          */
7376         err = tg3_alloc_consistent(tp);
7377         if (err)
7378                 return err;
7379
7380         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7381                 /* All MSI supporting chips should support tagged
7382                  * status.  Assert that this is the case.
7383                  */
7384                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7385                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7386                                "Not using MSI.\n", tp->dev->name);
7387                 } else if (pci_enable_msi(tp->pdev) == 0) {
7388                         u32 msi_mode;
7389
7390                         msi_mode = tr32(MSGINT_MODE);
7391                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7392                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7393                 }
7394         }
7395         err = tg3_request_irq(tp);
7396
7397         if (err) {
7398                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7399                         pci_disable_msi(tp->pdev);
7400                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7401                 }
7402                 tg3_free_consistent(tp);
7403                 return err;
7404         }
7405
7406         napi_enable(&tp->napi);
7407
7408         tg3_full_lock(tp, 0);
7409
7410         err = tg3_init_hw(tp, 1);
7411         if (err) {
7412                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7413                 tg3_free_rings(tp);
7414         } else {
7415                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7416                         tp->timer_offset = HZ;
7417                 else
7418                         tp->timer_offset = HZ / 10;
7419
7420                 BUG_ON(tp->timer_offset > HZ);
7421                 tp->timer_counter = tp->timer_multiplier =
7422                         (HZ / tp->timer_offset);
7423                 tp->asf_counter = tp->asf_multiplier =
7424                         ((HZ / tp->timer_offset) * 2);
7425
7426                 init_timer(&tp->timer);
7427                 tp->timer.expires = jiffies + tp->timer_offset;
7428                 tp->timer.data = (unsigned long) tp;
7429                 tp->timer.function = tg3_timer;
7430         }
7431
7432         tg3_full_unlock(tp);
7433
7434         if (err) {
7435                 napi_disable(&tp->napi);
7436                 free_irq(tp->pdev->irq, dev);
7437                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7438                         pci_disable_msi(tp->pdev);
7439                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7440                 }
7441                 tg3_free_consistent(tp);
7442                 return err;
7443         }
7444
7445         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7446                 err = tg3_test_msi(tp);
7447
7448                 if (err) {
7449                         tg3_full_lock(tp, 0);
7450
7451                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7452                                 pci_disable_msi(tp->pdev);
7453                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7454                         }
7455                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7456                         tg3_free_rings(tp);
7457                         tg3_free_consistent(tp);
7458
7459                         tg3_full_unlock(tp);
7460
7461                         napi_disable(&tp->napi);
7462
7463                         return err;
7464                 }
7465
7466                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7467                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7468                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7469
7470                                 tw32(PCIE_TRANSACTION_CFG,
7471                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7472                         }
7473                 }
7474         }
7475
7476         tg3_full_lock(tp, 0);
7477
7478         add_timer(&tp->timer);
7479         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7480         tg3_enable_ints(tp);
7481
7482         tg3_full_unlock(tp);
7483
7484         netif_start_queue(dev);
7485
7486         return 0;
7487 }
7488
7489 #if 0
7490 /*static*/ void tg3_dump_state(struct tg3 *tp)
7491 {
7492         u32 val32, val32_2, val32_3, val32_4, val32_5;
7493         u16 val16;
7494         int i;
7495
7496         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7497         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7498         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7499                val16, val32);
7500
7501         /* MAC block */
7502         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7503                tr32(MAC_MODE), tr32(MAC_STATUS));
7504         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7505                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7506         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7507                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7508         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7509                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7510
7511         /* Send data initiator control block */
7512         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7513                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7514         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7515                tr32(SNDDATAI_STATSCTRL));
7516
7517         /* Send data completion control block */
7518         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7519
7520         /* Send BD ring selector block */
7521         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7522                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7523
7524         /* Send BD initiator control block */
7525         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7526                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7527
7528         /* Send BD completion control block */
7529         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7530
7531         /* Receive list placement control block */
7532         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7533                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7534         printk("       RCVLPC_STATSCTRL[%08x]\n",
7535                tr32(RCVLPC_STATSCTRL));
7536
7537         /* Receive data and receive BD initiator control block */
7538         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7539                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7540
7541         /* Receive data completion control block */
7542         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7543                tr32(RCVDCC_MODE));
7544
7545         /* Receive BD initiator control block */
7546         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7547                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7548
7549         /* Receive BD completion control block */
7550         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7551                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7552
7553         /* Receive list selector control block */
7554         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7555                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7556
7557         /* Mbuf cluster free block */
7558         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7559                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7560
7561         /* Host coalescing control block */
7562         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7563                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7564         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7565                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7566                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7567         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7568                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7569                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7570         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7571                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7572         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7573                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7574
7575         /* Memory arbiter control block */
7576         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7577                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7578
7579         /* Buffer manager control block */
7580         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7581                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7582         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7583                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7584         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7585                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7586                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7587                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7588
7589         /* Read DMA control block */
7590         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7591                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7592
7593         /* Write DMA control block */
7594         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7595                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7596
7597         /* DMA completion block */
7598         printk("DEBUG: DMAC_MODE[%08x]\n",
7599                tr32(DMAC_MODE));
7600
7601         /* GRC block */
7602         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7603                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7604         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7605                tr32(GRC_LOCAL_CTRL));
7606
7607         /* TG3_BDINFOs */
7608         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7609                tr32(RCVDBDI_JUMBO_BD + 0x0),
7610                tr32(RCVDBDI_JUMBO_BD + 0x4),
7611                tr32(RCVDBDI_JUMBO_BD + 0x8),
7612                tr32(RCVDBDI_JUMBO_BD + 0xc));
7613         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7614                tr32(RCVDBDI_STD_BD + 0x0),
7615                tr32(RCVDBDI_STD_BD + 0x4),
7616                tr32(RCVDBDI_STD_BD + 0x8),
7617                tr32(RCVDBDI_STD_BD + 0xc));
7618         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7619                tr32(RCVDBDI_MINI_BD + 0x0),
7620                tr32(RCVDBDI_MINI_BD + 0x4),
7621                tr32(RCVDBDI_MINI_BD + 0x8),
7622                tr32(RCVDBDI_MINI_BD + 0xc));
7623
7624         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7625         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7626         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7627         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7628         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7629                val32, val32_2, val32_3, val32_4);
7630
7631         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7632         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7633         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7634         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7635         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7636                val32, val32_2, val32_3, val32_4);
7637
7638         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7639         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7640         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7641         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7642         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7643         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7644                val32, val32_2, val32_3, val32_4, val32_5);
7645
7646         /* SW status block */
7647         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7648                tp->hw_status->status,
7649                tp->hw_status->status_tag,
7650                tp->hw_status->rx_jumbo_consumer,
7651                tp->hw_status->rx_consumer,
7652                tp->hw_status->rx_mini_consumer,
7653                tp->hw_status->idx[0].rx_producer,
7654                tp->hw_status->idx[0].tx_consumer);
7655
7656         /* SW statistics block */
7657         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7658                ((u32 *)tp->hw_stats)[0],
7659                ((u32 *)tp->hw_stats)[1],
7660                ((u32 *)tp->hw_stats)[2],
7661                ((u32 *)tp->hw_stats)[3]);
7662
7663         /* Mailboxes */
7664         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7665                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7666                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7667                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7668                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7669
7670         /* NIC side send descriptors. */
7671         for (i = 0; i < 6; i++) {
7672                 unsigned long txd;
7673
7674                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7675                         + (i * sizeof(struct tg3_tx_buffer_desc));
7676                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7677                        i,
7678                        readl(txd + 0x0), readl(txd + 0x4),
7679                        readl(txd + 0x8), readl(txd + 0xc));
7680         }
7681
7682         /* NIC side RX descriptors. */
7683         for (i = 0; i < 6; i++) {
7684                 unsigned long rxd;
7685
7686                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7687                         + (i * sizeof(struct tg3_rx_buffer_desc));
7688                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7689                        i,
7690                        readl(rxd + 0x0), readl(rxd + 0x4),
7691                        readl(rxd + 0x8), readl(rxd + 0xc));
7692                 rxd += (4 * sizeof(u32));
7693                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7694                        i,
7695                        readl(rxd + 0x0), readl(rxd + 0x4),
7696                        readl(rxd + 0x8), readl(rxd + 0xc));
7697         }
7698
7699         for (i = 0; i < 6; i++) {
7700                 unsigned long rxd;
7701
7702                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7703                         + (i * sizeof(struct tg3_rx_buffer_desc));
7704                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7705                        i,
7706                        readl(rxd + 0x0), readl(rxd + 0x4),
7707                        readl(rxd + 0x8), readl(rxd + 0xc));
7708                 rxd += (4 * sizeof(u32));
7709                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7710                        i,
7711                        readl(rxd + 0x0), readl(rxd + 0x4),
7712                        readl(rxd + 0x8), readl(rxd + 0xc));
7713         }
7714 }
7715 #endif
7716
7717 static struct net_device_stats *tg3_get_stats(struct net_device *);
7718 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7719
7720 static int tg3_close(struct net_device *dev)
7721 {
7722         struct tg3 *tp = netdev_priv(dev);
7723
7724         napi_disable(&tp->napi);
7725         cancel_work_sync(&tp->reset_task);
7726
7727         netif_stop_queue(dev);
7728
7729         del_timer_sync(&tp->timer);
7730
7731         tg3_full_lock(tp, 1);
7732 #if 0
7733         tg3_dump_state(tp);
7734 #endif
7735
7736         tg3_disable_ints(tp);
7737
7738         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7739         tg3_free_rings(tp);
7740         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7741
7742         tg3_full_unlock(tp);
7743
7744         free_irq(tp->pdev->irq, dev);
7745         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7746                 pci_disable_msi(tp->pdev);
7747                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7748         }
7749
7750         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7751                sizeof(tp->net_stats_prev));
7752         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7753                sizeof(tp->estats_prev));
7754
7755         tg3_free_consistent(tp);
7756
7757         tg3_set_power_state(tp, PCI_D3hot);
7758
7759         netif_carrier_off(tp->dev);
7760
7761         return 0;
7762 }
7763
7764 static inline unsigned long get_stat64(tg3_stat64_t *val)
7765 {
7766         unsigned long ret;
7767
7768 #if (BITS_PER_LONG == 32)
7769         ret = val->low;
7770 #else
7771         ret = ((u64)val->high << 32) | ((u64)val->low);
7772 #endif
7773         return ret;
7774 }
7775
7776 static unsigned long calc_crc_errors(struct tg3 *tp)
7777 {
7778         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7779
7780         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7781             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7782              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7783                 u32 val;
7784
7785                 spin_lock_bh(&tp->lock);
7786                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7787                         tg3_writephy(tp, MII_TG3_TEST1,
7788                                      val | MII_TG3_TEST1_CRC_EN);
7789                         tg3_readphy(tp, 0x14, &val);
7790                 } else
7791                         val = 0;
7792                 spin_unlock_bh(&tp->lock);
7793
7794                 tp->phy_crc_errors += val;
7795
7796                 return tp->phy_crc_errors;
7797         }
7798
7799         return get_stat64(&hw_stats->rx_fcs_errors);
7800 }
7801
7802 #define ESTAT_ADD(member) \
7803         estats->member =        old_estats->member + \
7804                                 get_stat64(&hw_stats->member)
7805
7806 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7807 {
7808         struct tg3_ethtool_stats *estats = &tp->estats;
7809         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7810         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7811
7812         if (!hw_stats)
7813                 return old_estats;
7814
7815         ESTAT_ADD(rx_octets);
7816         ESTAT_ADD(rx_fragments);
7817         ESTAT_ADD(rx_ucast_packets);
7818         ESTAT_ADD(rx_mcast_packets);
7819         ESTAT_ADD(rx_bcast_packets);
7820         ESTAT_ADD(rx_fcs_errors);
7821         ESTAT_ADD(rx_align_errors);
7822         ESTAT_ADD(rx_xon_pause_rcvd);
7823         ESTAT_ADD(rx_xoff_pause_rcvd);
7824         ESTAT_ADD(rx_mac_ctrl_rcvd);
7825         ESTAT_ADD(rx_xoff_entered);
7826         ESTAT_ADD(rx_frame_too_long_errors);
7827         ESTAT_ADD(rx_jabbers);
7828         ESTAT_ADD(rx_undersize_packets);
7829         ESTAT_ADD(rx_in_length_errors);
7830         ESTAT_ADD(rx_out_length_errors);
7831         ESTAT_ADD(rx_64_or_less_octet_packets);
7832         ESTAT_ADD(rx_65_to_127_octet_packets);
7833         ESTAT_ADD(rx_128_to_255_octet_packets);
7834         ESTAT_ADD(rx_256_to_511_octet_packets);
7835         ESTAT_ADD(rx_512_to_1023_octet_packets);
7836         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7837         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7838         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7839         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7840         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7841
7842         ESTAT_ADD(tx_octets);
7843         ESTAT_ADD(tx_collisions);
7844         ESTAT_ADD(tx_xon_sent);
7845         ESTAT_ADD(tx_xoff_sent);
7846         ESTAT_ADD(tx_flow_control);
7847         ESTAT_ADD(tx_mac_errors);
7848         ESTAT_ADD(tx_single_collisions);
7849         ESTAT_ADD(tx_mult_collisions);
7850         ESTAT_ADD(tx_deferred);
7851         ESTAT_ADD(tx_excessive_collisions);
7852         ESTAT_ADD(tx_late_collisions);
7853         ESTAT_ADD(tx_collide_2times);
7854         ESTAT_ADD(tx_collide_3times);
7855         ESTAT_ADD(tx_collide_4times);
7856         ESTAT_ADD(tx_collide_5times);
7857         ESTAT_ADD(tx_collide_6times);
7858         ESTAT_ADD(tx_collide_7times);
7859         ESTAT_ADD(tx_collide_8times);
7860         ESTAT_ADD(tx_collide_9times);
7861         ESTAT_ADD(tx_collide_10times);
7862         ESTAT_ADD(tx_collide_11times);
7863         ESTAT_ADD(tx_collide_12times);
7864         ESTAT_ADD(tx_collide_13times);
7865         ESTAT_ADD(tx_collide_14times);
7866         ESTAT_ADD(tx_collide_15times);
7867         ESTAT_ADD(tx_ucast_packets);
7868         ESTAT_ADD(tx_mcast_packets);
7869         ESTAT_ADD(tx_bcast_packets);
7870         ESTAT_ADD(tx_carrier_sense_errors);
7871         ESTAT_ADD(tx_discards);
7872         ESTAT_ADD(tx_errors);
7873
7874         ESTAT_ADD(dma_writeq_full);
7875         ESTAT_ADD(dma_write_prioq_full);
7876         ESTAT_ADD(rxbds_empty);
7877         ESTAT_ADD(rx_discards);
7878         ESTAT_ADD(rx_errors);
7879         ESTAT_ADD(rx_threshold_hit);
7880
7881         ESTAT_ADD(dma_readq_full);
7882         ESTAT_ADD(dma_read_prioq_full);
7883         ESTAT_ADD(tx_comp_queue_full);
7884
7885         ESTAT_ADD(ring_set_send_prod_index);
7886         ESTAT_ADD(ring_status_update);
7887         ESTAT_ADD(nic_irqs);
7888         ESTAT_ADD(nic_avoided_irqs);
7889         ESTAT_ADD(nic_tx_threshold_hit);
7890
7891         return estats;
7892 }
7893
7894 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7895 {
7896         struct tg3 *tp = netdev_priv(dev);
7897         struct net_device_stats *stats = &tp->net_stats;
7898         struct net_device_stats *old_stats = &tp->net_stats_prev;
7899         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7900
7901         if (!hw_stats)
7902                 return old_stats;
7903
7904         stats->rx_packets = old_stats->rx_packets +
7905                 get_stat64(&hw_stats->rx_ucast_packets) +
7906                 get_stat64(&hw_stats->rx_mcast_packets) +
7907                 get_stat64(&hw_stats->rx_bcast_packets);
7908
7909         stats->tx_packets = old_stats->tx_packets +
7910                 get_stat64(&hw_stats->tx_ucast_packets) +
7911                 get_stat64(&hw_stats->tx_mcast_packets) +
7912                 get_stat64(&hw_stats->tx_bcast_packets);
7913
7914         stats->rx_bytes = old_stats->rx_bytes +
7915                 get_stat64(&hw_stats->rx_octets);
7916         stats->tx_bytes = old_stats->tx_bytes +
7917                 get_stat64(&hw_stats->tx_octets);
7918
7919         stats->rx_errors = old_stats->rx_errors +
7920                 get_stat64(&hw_stats->rx_errors);
7921         stats->tx_errors = old_stats->tx_errors +
7922                 get_stat64(&hw_stats->tx_errors) +
7923                 get_stat64(&hw_stats->tx_mac_errors) +
7924                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7925                 get_stat64(&hw_stats->tx_discards);
7926
7927         stats->multicast = old_stats->multicast +
7928                 get_stat64(&hw_stats->rx_mcast_packets);
7929         stats->collisions = old_stats->collisions +
7930                 get_stat64(&hw_stats->tx_collisions);
7931
7932         stats->rx_length_errors = old_stats->rx_length_errors +
7933                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7934                 get_stat64(&hw_stats->rx_undersize_packets);
7935
7936         stats->rx_over_errors = old_stats->rx_over_errors +
7937                 get_stat64(&hw_stats->rxbds_empty);
7938         stats->rx_frame_errors = old_stats->rx_frame_errors +
7939                 get_stat64(&hw_stats->rx_align_errors);
7940         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7941                 get_stat64(&hw_stats->tx_discards);
7942         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7943                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7944
7945         stats->rx_crc_errors = old_stats->rx_crc_errors +
7946                 calc_crc_errors(tp);
7947
7948         stats->rx_missed_errors = old_stats->rx_missed_errors +
7949                 get_stat64(&hw_stats->rx_discards);
7950
7951         return stats;
7952 }
7953
7954 static inline u32 calc_crc(unsigned char *buf, int len)
7955 {
7956         u32 reg;
7957         u32 tmp;
7958         int j, k;
7959
7960         reg = 0xffffffff;
7961
7962         for (j = 0; j < len; j++) {
7963                 reg ^= buf[j];
7964
7965                 for (k = 0; k < 8; k++) {
7966                         tmp = reg & 0x01;
7967
7968                         reg >>= 1;
7969
7970                         if (tmp) {
7971                                 reg ^= 0xedb88320;
7972                         }
7973                 }
7974         }
7975
7976         return ~reg;
7977 }
7978
7979 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7980 {
7981         /* accept or reject all multicast frames */
7982         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7983         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7984         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7985         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7986 }
7987
7988 static void __tg3_set_rx_mode(struct net_device *dev)
7989 {
7990         struct tg3 *tp = netdev_priv(dev);
7991         u32 rx_mode;
7992
7993         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7994                                   RX_MODE_KEEP_VLAN_TAG);
7995
7996         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7997          * flag clear.
7998          */
7999 #if TG3_VLAN_TAG_USED
8000         if (!tp->vlgrp &&
8001             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8002                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8003 #else
8004         /* By definition, VLAN is disabled always in this
8005          * case.
8006          */
8007         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8008                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8009 #endif
8010
8011         if (dev->flags & IFF_PROMISC) {
8012                 /* Promiscuous mode. */
8013                 rx_mode |= RX_MODE_PROMISC;
8014         } else if (dev->flags & IFF_ALLMULTI) {
8015                 /* Accept all multicast. */
8016                 tg3_set_multi (tp, 1);
8017         } else if (dev->mc_count < 1) {
8018                 /* Reject all multicast. */
8019                 tg3_set_multi (tp, 0);
8020         } else {
8021                 /* Accept one or more multicast(s). */
8022                 struct dev_mc_list *mclist;
8023                 unsigned int i;
8024                 u32 mc_filter[4] = { 0, };
8025                 u32 regidx;
8026                 u32 bit;
8027                 u32 crc;
8028
8029                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8030                      i++, mclist = mclist->next) {
8031
8032                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8033                         bit = ~crc & 0x7f;
8034                         regidx = (bit & 0x60) >> 5;
8035                         bit &= 0x1f;
8036                         mc_filter[regidx] |= (1 << bit);
8037                 }
8038
8039                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8040                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8041                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8042                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8043         }
8044
8045         if (rx_mode != tp->rx_mode) {
8046                 tp->rx_mode = rx_mode;
8047                 tw32_f(MAC_RX_MODE, rx_mode);
8048                 udelay(10);
8049         }
8050 }
8051
8052 static void tg3_set_rx_mode(struct net_device *dev)
8053 {
8054         struct tg3 *tp = netdev_priv(dev);
8055
8056         if (!netif_running(dev))
8057                 return;
8058
8059         tg3_full_lock(tp, 0);
8060         __tg3_set_rx_mode(dev);
8061         tg3_full_unlock(tp);
8062 }
8063
8064 #define TG3_REGDUMP_LEN         (32 * 1024)
8065
8066 static int tg3_get_regs_len(struct net_device *dev)
8067 {
8068         return TG3_REGDUMP_LEN;
8069 }
8070
8071 static void tg3_get_regs(struct net_device *dev,
8072                 struct ethtool_regs *regs, void *_p)
8073 {
8074         u32 *p = _p;
8075         struct tg3 *tp = netdev_priv(dev);
8076         u8 *orig_p = _p;
8077         int i;
8078
8079         regs->version = 0;
8080
8081         memset(p, 0, TG3_REGDUMP_LEN);
8082
8083         if (tp->link_config.phy_is_low_power)
8084                 return;
8085
8086         tg3_full_lock(tp, 0);
8087
8088 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8089 #define GET_REG32_LOOP(base,len)                \
8090 do {    p = (u32 *)(orig_p + (base));           \
8091         for (i = 0; i < len; i += 4)            \
8092                 __GET_REG32((base) + i);        \
8093 } while (0)
8094 #define GET_REG32_1(reg)                        \
8095 do {    p = (u32 *)(orig_p + (reg));            \
8096         __GET_REG32((reg));                     \
8097 } while (0)
8098
8099         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8100         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8101         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8102         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8103         GET_REG32_1(SNDDATAC_MODE);
8104         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8105         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8106         GET_REG32_1(SNDBDC_MODE);
8107         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8108         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8109         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8110         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8111         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8112         GET_REG32_1(RCVDCC_MODE);
8113         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8114         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8115         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8116         GET_REG32_1(MBFREE_MODE);
8117         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8118         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8119         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8120         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8121         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8122         GET_REG32_1(RX_CPU_MODE);
8123         GET_REG32_1(RX_CPU_STATE);
8124         GET_REG32_1(RX_CPU_PGMCTR);
8125         GET_REG32_1(RX_CPU_HWBKPT);
8126         GET_REG32_1(TX_CPU_MODE);
8127         GET_REG32_1(TX_CPU_STATE);
8128         GET_REG32_1(TX_CPU_PGMCTR);
8129         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8130         GET_REG32_LOOP(FTQ_RESET, 0x120);
8131         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8132         GET_REG32_1(DMAC_MODE);
8133         GET_REG32_LOOP(GRC_MODE, 0x4c);
8134         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8135                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8136
8137 #undef __GET_REG32
8138 #undef GET_REG32_LOOP
8139 #undef GET_REG32_1
8140
8141         tg3_full_unlock(tp);
8142 }
8143
8144 static int tg3_get_eeprom_len(struct net_device *dev)
8145 {
8146         struct tg3 *tp = netdev_priv(dev);
8147
8148         return tp->nvram_size;
8149 }
8150
8151 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8152 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8153
8154 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8155 {
8156         struct tg3 *tp = netdev_priv(dev);
8157         int ret;
8158         u8  *pd;
8159         u32 i, offset, len, val, b_offset, b_count;
8160
8161         if (tp->link_config.phy_is_low_power)
8162                 return -EAGAIN;
8163
8164         offset = eeprom->offset;
8165         len = eeprom->len;
8166         eeprom->len = 0;
8167
8168         eeprom->magic = TG3_EEPROM_MAGIC;
8169
8170         if (offset & 3) {
8171                 /* adjustments to start on required 4 byte boundary */
8172                 b_offset = offset & 3;
8173                 b_count = 4 - b_offset;
8174                 if (b_count > len) {
8175                         /* i.e. offset=1 len=2 */
8176                         b_count = len;
8177                 }
8178                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8179                 if (ret)
8180                         return ret;
8181                 val = cpu_to_le32(val);
8182                 memcpy(data, ((char*)&val) + b_offset, b_count);
8183                 len -= b_count;
8184                 offset += b_count;
8185                 eeprom->len += b_count;
8186         }
8187
8188         /* read bytes upto the last 4 byte boundary */
8189         pd = &data[eeprom->len];
8190         for (i = 0; i < (len - (len & 3)); i += 4) {
8191                 ret = tg3_nvram_read(tp, offset + i, &val);
8192                 if (ret) {
8193                         eeprom->len += i;
8194                         return ret;
8195                 }
8196                 val = cpu_to_le32(val);
8197                 memcpy(pd + i, &val, 4);
8198         }
8199         eeprom->len += i;
8200
8201         if (len & 3) {
8202                 /* read last bytes not ending on 4 byte boundary */
8203                 pd = &data[eeprom->len];
8204                 b_count = len & 3;
8205                 b_offset = offset + len - b_count;
8206                 ret = tg3_nvram_read(tp, b_offset, &val);
8207                 if (ret)
8208                         return ret;
8209                 val = cpu_to_le32(val);
8210                 memcpy(pd, ((char*)&val), b_count);
8211                 eeprom->len += b_count;
8212         }
8213         return 0;
8214 }
8215
8216 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8217
8218 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8219 {
8220         struct tg3 *tp = netdev_priv(dev);
8221         int ret;
8222         u32 offset, len, b_offset, odd_len, start, end;
8223         u8 *buf;
8224
8225         if (tp->link_config.phy_is_low_power)
8226                 return -EAGAIN;
8227
8228         if (eeprom->magic != TG3_EEPROM_MAGIC)
8229                 return -EINVAL;
8230
8231         offset = eeprom->offset;
8232         len = eeprom->len;
8233
8234         if ((b_offset = (offset & 3))) {
8235                 /* adjustments to start on required 4 byte boundary */
8236                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8237                 if (ret)
8238                         return ret;
8239                 start = cpu_to_le32(start);
8240                 len += b_offset;
8241                 offset &= ~3;
8242                 if (len < 4)
8243                         len = 4;
8244         }
8245
8246         odd_len = 0;
8247         if (len & 3) {
8248                 /* adjustments to end on required 4 byte boundary */
8249                 odd_len = 1;
8250                 len = (len + 3) & ~3;
8251                 ret = tg3_nvram_read(tp, offset+len-4, &end);
8252                 if (ret)
8253                         return ret;
8254                 end = cpu_to_le32(end);
8255         }
8256
8257         buf = data;
8258         if (b_offset || odd_len) {
8259                 buf = kmalloc(len, GFP_KERNEL);
8260                 if (!buf)
8261                         return -ENOMEM;
8262                 if (b_offset)
8263                         memcpy(buf, &start, 4);
8264                 if (odd_len)
8265                         memcpy(buf+len-4, &end, 4);
8266                 memcpy(buf + b_offset, data, eeprom->len);
8267         }
8268
8269         ret = tg3_nvram_write_block(tp, offset, len, buf);
8270
8271         if (buf != data)
8272                 kfree(buf);
8273
8274         return ret;
8275 }
8276
8277 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8278 {
8279         struct tg3 *tp = netdev_priv(dev);
8280
8281         cmd->supported = (SUPPORTED_Autoneg);
8282
8283         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8284                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8285                                    SUPPORTED_1000baseT_Full);
8286
8287         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8288                 cmd->supported |= (SUPPORTED_100baseT_Half |
8289                                   SUPPORTED_100baseT_Full |
8290                                   SUPPORTED_10baseT_Half |
8291                                   SUPPORTED_10baseT_Full |
8292                                   SUPPORTED_MII);
8293                 cmd->port = PORT_TP;
8294         } else {
8295                 cmd->supported |= SUPPORTED_FIBRE;
8296                 cmd->port = PORT_FIBRE;
8297         }
8298
8299         cmd->advertising = tp->link_config.advertising;
8300         if (netif_running(dev)) {
8301                 cmd->speed = tp->link_config.active_speed;
8302                 cmd->duplex = tp->link_config.active_duplex;
8303         }
8304         cmd->phy_address = PHY_ADDR;
8305         cmd->transceiver = 0;
8306         cmd->autoneg = tp->link_config.autoneg;
8307         cmd->maxtxpkt = 0;
8308         cmd->maxrxpkt = 0;
8309         return 0;
8310 }
8311
8312 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8313 {
8314         struct tg3 *tp = netdev_priv(dev);
8315
8316         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8317                 /* These are the only valid advertisement bits allowed.  */
8318                 if (cmd->autoneg == AUTONEG_ENABLE &&
8319                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8320                                           ADVERTISED_1000baseT_Full |
8321                                           ADVERTISED_Autoneg |
8322                                           ADVERTISED_FIBRE)))
8323                         return -EINVAL;
8324                 /* Fiber can only do SPEED_1000.  */
8325                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8326                          (cmd->speed != SPEED_1000))
8327                         return -EINVAL;
8328         /* Copper cannot force SPEED_1000.  */
8329         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8330                    (cmd->speed == SPEED_1000))
8331                 return -EINVAL;
8332         else if ((cmd->speed == SPEED_1000) &&
8333                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8334                 return -EINVAL;
8335
8336         tg3_full_lock(tp, 0);
8337
8338         tp->link_config.autoneg = cmd->autoneg;
8339         if (cmd->autoneg == AUTONEG_ENABLE) {
8340                 tp->link_config.advertising = (cmd->advertising |
8341                                               ADVERTISED_Autoneg);
8342                 tp->link_config.speed = SPEED_INVALID;
8343                 tp->link_config.duplex = DUPLEX_INVALID;
8344         } else {
8345                 tp->link_config.advertising = 0;
8346                 tp->link_config.speed = cmd->speed;
8347                 tp->link_config.duplex = cmd->duplex;
8348         }
8349
8350         tp->link_config.orig_speed = tp->link_config.speed;
8351         tp->link_config.orig_duplex = tp->link_config.duplex;
8352         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8353
8354         if (netif_running(dev))
8355                 tg3_setup_phy(tp, 1);
8356
8357         tg3_full_unlock(tp);
8358
8359         return 0;
8360 }
8361
8362 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8363 {
8364         struct tg3 *tp = netdev_priv(dev);
8365
8366         strcpy(info->driver, DRV_MODULE_NAME);
8367         strcpy(info->version, DRV_MODULE_VERSION);
8368         strcpy(info->fw_version, tp->fw_ver);
8369         strcpy(info->bus_info, pci_name(tp->pdev));
8370 }
8371
8372 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8373 {
8374         struct tg3 *tp = netdev_priv(dev);
8375
8376         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8377                 wol->supported = WAKE_MAGIC;
8378         else
8379                 wol->supported = 0;
8380         wol->wolopts = 0;
8381         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8382                 wol->wolopts = WAKE_MAGIC;
8383         memset(&wol->sopass, 0, sizeof(wol->sopass));
8384 }
8385
8386 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8387 {
8388         struct tg3 *tp = netdev_priv(dev);
8389
8390         if (wol->wolopts & ~WAKE_MAGIC)
8391                 return -EINVAL;
8392         if ((wol->wolopts & WAKE_MAGIC) &&
8393             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8394                 return -EINVAL;
8395
8396         spin_lock_bh(&tp->lock);
8397         if (wol->wolopts & WAKE_MAGIC)
8398                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8399         else
8400                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8401         spin_unlock_bh(&tp->lock);
8402
8403         return 0;
8404 }
8405
8406 static u32 tg3_get_msglevel(struct net_device *dev)
8407 {
8408         struct tg3 *tp = netdev_priv(dev);
8409         return tp->msg_enable;
8410 }
8411
8412 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8413 {
8414         struct tg3 *tp = netdev_priv(dev);
8415         tp->msg_enable = value;
8416 }
8417
8418 static int tg3_set_tso(struct net_device *dev, u32 value)
8419 {
8420         struct tg3 *tp = netdev_priv(dev);
8421
8422         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8423                 if (value)
8424                         return -EINVAL;
8425                 return 0;
8426         }
8427         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8428             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8429                 if (value) {
8430                         dev->features |= NETIF_F_TSO6;
8431                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8432                                 dev->features |= NETIF_F_TSO_ECN;
8433                 } else
8434                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8435         }
8436         return ethtool_op_set_tso(dev, value);
8437 }
8438
8439 static int tg3_nway_reset(struct net_device *dev)
8440 {
8441         struct tg3 *tp = netdev_priv(dev);
8442         u32 bmcr;
8443         int r;
8444
8445         if (!netif_running(dev))
8446                 return -EAGAIN;
8447
8448         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8449                 return -EINVAL;
8450
8451         spin_lock_bh(&tp->lock);
8452         r = -EINVAL;
8453         tg3_readphy(tp, MII_BMCR, &bmcr);
8454         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8455             ((bmcr & BMCR_ANENABLE) ||
8456              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8457                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8458                                            BMCR_ANENABLE);
8459                 r = 0;
8460         }
8461         spin_unlock_bh(&tp->lock);
8462
8463         return r;
8464 }
8465
8466 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8467 {
8468         struct tg3 *tp = netdev_priv(dev);
8469
8470         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8471         ering->rx_mini_max_pending = 0;
8472         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8473                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8474         else
8475                 ering->rx_jumbo_max_pending = 0;
8476
8477         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8478
8479         ering->rx_pending = tp->rx_pending;
8480         ering->rx_mini_pending = 0;
8481         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8482                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8483         else
8484                 ering->rx_jumbo_pending = 0;
8485
8486         ering->tx_pending = tp->tx_pending;
8487 }
8488
8489 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8490 {
8491         struct tg3 *tp = netdev_priv(dev);
8492         int irq_sync = 0, err = 0;
8493
8494         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8495             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8496             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8497             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8498             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8499              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8500                 return -EINVAL;
8501
8502         if (netif_running(dev)) {
8503                 tg3_netif_stop(tp);
8504                 irq_sync = 1;
8505         }
8506
8507         tg3_full_lock(tp, irq_sync);
8508
8509         tp->rx_pending = ering->rx_pending;
8510
8511         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8512             tp->rx_pending > 63)
8513                 tp->rx_pending = 63;
8514         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8515         tp->tx_pending = ering->tx_pending;
8516
8517         if (netif_running(dev)) {
8518                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8519                 err = tg3_restart_hw(tp, 1);
8520                 if (!err)
8521                         tg3_netif_start(tp);
8522         }
8523
8524         tg3_full_unlock(tp);
8525
8526         return err;
8527 }
8528
8529 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8530 {
8531         struct tg3 *tp = netdev_priv(dev);
8532
8533         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8534         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8535         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8536 }
8537
8538 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8539 {
8540         struct tg3 *tp = netdev_priv(dev);
8541         int irq_sync = 0, err = 0;
8542
8543         if (netif_running(dev)) {
8544                 tg3_netif_stop(tp);
8545                 irq_sync = 1;
8546         }
8547
8548         tg3_full_lock(tp, irq_sync);
8549
8550         if (epause->autoneg)
8551                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8552         else
8553                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8554         if (epause->rx_pause)
8555                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8556         else
8557                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8558         if (epause->tx_pause)
8559                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8560         else
8561                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8562
8563         if (netif_running(dev)) {
8564                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8565                 err = tg3_restart_hw(tp, 1);
8566                 if (!err)
8567                         tg3_netif_start(tp);
8568         }
8569
8570         tg3_full_unlock(tp);
8571
8572         return err;
8573 }
8574
8575 static u32 tg3_get_rx_csum(struct net_device *dev)
8576 {
8577         struct tg3 *tp = netdev_priv(dev);
8578         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8579 }
8580
8581 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8582 {
8583         struct tg3 *tp = netdev_priv(dev);
8584
8585         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8586                 if (data != 0)
8587                         return -EINVAL;
8588                 return 0;
8589         }
8590
8591         spin_lock_bh(&tp->lock);
8592         if (data)
8593                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8594         else
8595                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8596         spin_unlock_bh(&tp->lock);
8597
8598         return 0;
8599 }
8600
8601 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8602 {
8603         struct tg3 *tp = netdev_priv(dev);
8604
8605         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8606                 if (data != 0)
8607                         return -EINVAL;
8608                 return 0;
8609         }
8610
8611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8612             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8613             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8614             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8615                 ethtool_op_set_tx_ipv6_csum(dev, data);
8616         else
8617                 ethtool_op_set_tx_csum(dev, data);
8618
8619         return 0;
8620 }
8621
8622 static int tg3_get_sset_count (struct net_device *dev, int sset)
8623 {
8624         switch (sset) {
8625         case ETH_SS_TEST:
8626                 return TG3_NUM_TEST;
8627         case ETH_SS_STATS:
8628                 return TG3_NUM_STATS;
8629         default:
8630                 return -EOPNOTSUPP;
8631         }
8632 }
8633
8634 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8635 {
8636         switch (stringset) {
8637         case ETH_SS_STATS:
8638                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8639                 break;
8640         case ETH_SS_TEST:
8641                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8642                 break;
8643         default:
8644                 WARN_ON(1);     /* we need a WARN() */
8645                 break;
8646         }
8647 }
8648
8649 static int tg3_phys_id(struct net_device *dev, u32 data)
8650 {
8651         struct tg3 *tp = netdev_priv(dev);
8652         int i;
8653
8654         if (!netif_running(tp->dev))
8655                 return -EAGAIN;
8656
8657         if (data == 0)
8658                 data = 2;
8659
8660         for (i = 0; i < (data * 2); i++) {
8661                 if ((i % 2) == 0)
8662                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8663                                            LED_CTRL_1000MBPS_ON |
8664                                            LED_CTRL_100MBPS_ON |
8665                                            LED_CTRL_10MBPS_ON |
8666                                            LED_CTRL_TRAFFIC_OVERRIDE |
8667                                            LED_CTRL_TRAFFIC_BLINK |
8668                                            LED_CTRL_TRAFFIC_LED);
8669
8670                 else
8671                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8672                                            LED_CTRL_TRAFFIC_OVERRIDE);
8673
8674                 if (msleep_interruptible(500))
8675                         break;
8676         }
8677         tw32(MAC_LED_CTRL, tp->led_ctrl);
8678         return 0;
8679 }
8680
8681 static void tg3_get_ethtool_stats (struct net_device *dev,
8682                                    struct ethtool_stats *estats, u64 *tmp_stats)
8683 {
8684         struct tg3 *tp = netdev_priv(dev);
8685         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8686 }
8687
8688 #define NVRAM_TEST_SIZE 0x100
8689 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8690 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8691 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8692
8693 static int tg3_test_nvram(struct tg3 *tp)
8694 {
8695         u32 *buf, csum, magic;
8696         int i, j, k, err = 0, size;
8697
8698         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8699                 return -EIO;
8700
8701         if (magic == TG3_EEPROM_MAGIC)
8702                 size = NVRAM_TEST_SIZE;
8703         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8704                 if ((magic & 0xe00000) == 0x200000)
8705                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8706                 else
8707                         return 0;
8708         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8709                 size = NVRAM_SELFBOOT_HW_SIZE;
8710         else
8711                 return -EIO;
8712
8713         buf = kmalloc(size, GFP_KERNEL);
8714         if (buf == NULL)
8715                 return -ENOMEM;
8716
8717         err = -EIO;
8718         for (i = 0, j = 0; i < size; i += 4, j++) {
8719                 u32 val;
8720
8721                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8722                         break;
8723                 buf[j] = cpu_to_le32(val);
8724         }
8725         if (i < size)
8726                 goto out;
8727
8728         /* Selfboot format */
8729         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8730             TG3_EEPROM_MAGIC_FW) {
8731                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8732
8733                 for (i = 0; i < size; i++)
8734                         csum8 += buf8[i];
8735
8736                 if (csum8 == 0) {
8737                         err = 0;
8738                         goto out;
8739                 }
8740
8741                 err = -EIO;
8742                 goto out;
8743         }
8744
8745         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8746             TG3_EEPROM_MAGIC_HW) {
8747                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8748                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8749                 u8 *buf8 = (u8 *) buf;
8750
8751                 /* Separate the parity bits and the data bytes.  */
8752                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8753                         if ((i == 0) || (i == 8)) {
8754                                 int l;
8755                                 u8 msk;
8756
8757                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8758                                         parity[k++] = buf8[i] & msk;
8759                                 i++;
8760                         }
8761                         else if (i == 16) {
8762                                 int l;
8763                                 u8 msk;
8764
8765                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8766                                         parity[k++] = buf8[i] & msk;
8767                                 i++;
8768
8769                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8770                                         parity[k++] = buf8[i] & msk;
8771                                 i++;
8772                         }
8773                         data[j++] = buf8[i];
8774                 }
8775
8776                 err = -EIO;
8777                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8778                         u8 hw8 = hweight8(data[i]);
8779
8780                         if ((hw8 & 0x1) && parity[i])
8781                                 goto out;
8782                         else if (!(hw8 & 0x1) && !parity[i])
8783                                 goto out;
8784                 }
8785                 err = 0;
8786                 goto out;
8787         }
8788
8789         /* Bootstrap checksum at offset 0x10 */
8790         csum = calc_crc((unsigned char *) buf, 0x10);
8791         if(csum != cpu_to_le32(buf[0x10/4]))
8792                 goto out;
8793
8794         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8795         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8796         if (csum != cpu_to_le32(buf[0xfc/4]))
8797                  goto out;
8798
8799         err = 0;
8800
8801 out:
8802         kfree(buf);
8803         return err;
8804 }
8805
8806 #define TG3_SERDES_TIMEOUT_SEC  2
8807 #define TG3_COPPER_TIMEOUT_SEC  6
8808
8809 static int tg3_test_link(struct tg3 *tp)
8810 {
8811         int i, max;
8812
8813         if (!netif_running(tp->dev))
8814                 return -ENODEV;
8815
8816         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8817                 max = TG3_SERDES_TIMEOUT_SEC;
8818         else
8819                 max = TG3_COPPER_TIMEOUT_SEC;
8820
8821         for (i = 0; i < max; i++) {
8822                 if (netif_carrier_ok(tp->dev))
8823                         return 0;
8824
8825                 if (msleep_interruptible(1000))
8826                         break;
8827         }
8828
8829         return -EIO;
8830 }
8831
8832 /* Only test the commonly used registers */
8833 static int tg3_test_registers(struct tg3 *tp)
8834 {
8835         int i, is_5705, is_5750;
8836         u32 offset, read_mask, write_mask, val, save_val, read_val;
8837         static struct {
8838                 u16 offset;
8839                 u16 flags;
8840 #define TG3_FL_5705     0x1
8841 #define TG3_FL_NOT_5705 0x2
8842 #define TG3_FL_NOT_5788 0x4
8843 #define TG3_FL_NOT_5750 0x8
8844                 u32 read_mask;
8845                 u32 write_mask;
8846         } reg_tbl[] = {
8847                 /* MAC Control Registers */
8848                 { MAC_MODE, TG3_FL_NOT_5705,
8849                         0x00000000, 0x00ef6f8c },
8850                 { MAC_MODE, TG3_FL_5705,
8851                         0x00000000, 0x01ef6b8c },
8852                 { MAC_STATUS, TG3_FL_NOT_5705,
8853                         0x03800107, 0x00000000 },
8854                 { MAC_STATUS, TG3_FL_5705,
8855                         0x03800100, 0x00000000 },
8856                 { MAC_ADDR_0_HIGH, 0x0000,
8857                         0x00000000, 0x0000ffff },
8858                 { MAC_ADDR_0_LOW, 0x0000,
8859                         0x00000000, 0xffffffff },
8860                 { MAC_RX_MTU_SIZE, 0x0000,
8861                         0x00000000, 0x0000ffff },
8862                 { MAC_TX_MODE, 0x0000,
8863                         0x00000000, 0x00000070 },
8864                 { MAC_TX_LENGTHS, 0x0000,
8865                         0x00000000, 0x00003fff },
8866                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8867                         0x00000000, 0x000007fc },
8868                 { MAC_RX_MODE, TG3_FL_5705,
8869                         0x00000000, 0x000007dc },
8870                 { MAC_HASH_REG_0, 0x0000,
8871                         0x00000000, 0xffffffff },
8872                 { MAC_HASH_REG_1, 0x0000,
8873                         0x00000000, 0xffffffff },
8874                 { MAC_HASH_REG_2, 0x0000,
8875                         0x00000000, 0xffffffff },
8876                 { MAC_HASH_REG_3, 0x0000,
8877                         0x00000000, 0xffffffff },
8878
8879                 /* Receive Data and Receive BD Initiator Control Registers. */
8880                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8881                         0x00000000, 0xffffffff },
8882                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8883                         0x00000000, 0xffffffff },
8884                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8885                         0x00000000, 0x00000003 },
8886                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8887                         0x00000000, 0xffffffff },
8888                 { RCVDBDI_STD_BD+0, 0x0000,
8889                         0x00000000, 0xffffffff },
8890                 { RCVDBDI_STD_BD+4, 0x0000,
8891                         0x00000000, 0xffffffff },
8892                 { RCVDBDI_STD_BD+8, 0x0000,
8893                         0x00000000, 0xffff0002 },
8894                 { RCVDBDI_STD_BD+0xc, 0x0000,
8895                         0x00000000, 0xffffffff },
8896
8897                 /* Receive BD Initiator Control Registers. */
8898                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8899                         0x00000000, 0xffffffff },
8900                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8901                         0x00000000, 0x000003ff },
8902                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8903                         0x00000000, 0xffffffff },
8904
8905                 /* Host Coalescing Control Registers. */
8906                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8907                         0x00000000, 0x00000004 },
8908                 { HOSTCC_MODE, TG3_FL_5705,
8909                         0x00000000, 0x000000f6 },
8910                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8911                         0x00000000, 0xffffffff },
8912                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8913                         0x00000000, 0x000003ff },
8914                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8915                         0x00000000, 0xffffffff },
8916                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8917                         0x00000000, 0x000003ff },
8918                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8919                         0x00000000, 0xffffffff },
8920                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8921                         0x00000000, 0x000000ff },
8922                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8923                         0x00000000, 0xffffffff },
8924                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8925                         0x00000000, 0x000000ff },
8926                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8927                         0x00000000, 0xffffffff },
8928                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8929                         0x00000000, 0xffffffff },
8930                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8931                         0x00000000, 0xffffffff },
8932                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8933                         0x00000000, 0x000000ff },
8934                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8935                         0x00000000, 0xffffffff },
8936                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8937                         0x00000000, 0x000000ff },
8938                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8939                         0x00000000, 0xffffffff },
8940                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8941                         0x00000000, 0xffffffff },
8942                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8943                         0x00000000, 0xffffffff },
8944                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8945                         0x00000000, 0xffffffff },
8946                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8947                         0x00000000, 0xffffffff },
8948                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8949                         0xffffffff, 0x00000000 },
8950                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8951                         0xffffffff, 0x00000000 },
8952
8953                 /* Buffer Manager Control Registers. */
8954                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8955                         0x00000000, 0x007fff80 },
8956                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8957                         0x00000000, 0x007fffff },
8958                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8959                         0x00000000, 0x0000003f },
8960                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8961                         0x00000000, 0x000001ff },
8962                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8963                         0x00000000, 0x000001ff },
8964                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8965                         0xffffffff, 0x00000000 },
8966                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8967                         0xffffffff, 0x00000000 },
8968
8969                 /* Mailbox Registers */
8970                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8971                         0x00000000, 0x000001ff },
8972                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8973                         0x00000000, 0x000001ff },
8974                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8975                         0x00000000, 0x000007ff },
8976                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8977                         0x00000000, 0x000001ff },
8978
8979                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8980         };
8981
8982         is_5705 = is_5750 = 0;
8983         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8984                 is_5705 = 1;
8985                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8986                         is_5750 = 1;
8987         }
8988
8989         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8990                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8991                         continue;
8992
8993                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8994                         continue;
8995
8996                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8997                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8998                         continue;
8999
9000                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9001                         continue;
9002
9003                 offset = (u32) reg_tbl[i].offset;
9004                 read_mask = reg_tbl[i].read_mask;
9005                 write_mask = reg_tbl[i].write_mask;
9006
9007                 /* Save the original register content */
9008                 save_val = tr32(offset);
9009
9010                 /* Determine the read-only value. */
9011                 read_val = save_val & read_mask;
9012
9013                 /* Write zero to the register, then make sure the read-only bits
9014                  * are not changed and the read/write bits are all zeros.
9015                  */
9016                 tw32(offset, 0);
9017
9018                 val = tr32(offset);
9019
9020                 /* Test the read-only and read/write bits. */
9021                 if (((val & read_mask) != read_val) || (val & write_mask))
9022                         goto out;
9023
9024                 /* Write ones to all the bits defined by RdMask and WrMask, then
9025                  * make sure the read-only bits are not changed and the
9026                  * read/write bits are all ones.
9027                  */
9028                 tw32(offset, read_mask | write_mask);
9029
9030                 val = tr32(offset);
9031
9032                 /* Test the read-only bits. */
9033                 if ((val & read_mask) != read_val)
9034                         goto out;
9035
9036                 /* Test the read/write bits. */
9037                 if ((val & write_mask) != write_mask)
9038                         goto out;
9039
9040                 tw32(offset, save_val);
9041         }
9042
9043         return 0;
9044
9045 out:
9046         if (netif_msg_hw(tp))
9047                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9048                        offset);
9049         tw32(offset, save_val);
9050         return -EIO;
9051 }
9052
9053 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9054 {
9055         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9056         int i;
9057         u32 j;
9058
9059         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9060                 for (j = 0; j < len; j += 4) {
9061                         u32 val;
9062
9063                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9064                         tg3_read_mem(tp, offset + j, &val);
9065                         if (val != test_pattern[i])
9066                                 return -EIO;
9067                 }
9068         }
9069         return 0;
9070 }
9071
9072 static int tg3_test_memory(struct tg3 *tp)
9073 {
9074         static struct mem_entry {
9075                 u32 offset;
9076                 u32 len;
9077         } mem_tbl_570x[] = {
9078                 { 0x00000000, 0x00b50},
9079                 { 0x00002000, 0x1c000},
9080                 { 0xffffffff, 0x00000}
9081         }, mem_tbl_5705[] = {
9082                 { 0x00000100, 0x0000c},
9083                 { 0x00000200, 0x00008},
9084                 { 0x00004000, 0x00800},
9085                 { 0x00006000, 0x01000},
9086                 { 0x00008000, 0x02000},
9087                 { 0x00010000, 0x0e000},
9088                 { 0xffffffff, 0x00000}
9089         }, mem_tbl_5755[] = {
9090                 { 0x00000200, 0x00008},
9091                 { 0x00004000, 0x00800},
9092                 { 0x00006000, 0x00800},
9093                 { 0x00008000, 0x02000},
9094                 { 0x00010000, 0x0c000},
9095                 { 0xffffffff, 0x00000}
9096         }, mem_tbl_5906[] = {
9097                 { 0x00000200, 0x00008},
9098                 { 0x00004000, 0x00400},
9099                 { 0x00006000, 0x00400},
9100                 { 0x00008000, 0x01000},
9101                 { 0x00010000, 0x01000},
9102                 { 0xffffffff, 0x00000}
9103         };
9104         struct mem_entry *mem_tbl;
9105         int err = 0;
9106         int i;
9107
9108         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9109                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9110                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9111                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9112                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9113                         mem_tbl = mem_tbl_5755;
9114                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9115                         mem_tbl = mem_tbl_5906;
9116                 else
9117                         mem_tbl = mem_tbl_5705;
9118         } else
9119                 mem_tbl = mem_tbl_570x;
9120
9121         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9122                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9123                     mem_tbl[i].len)) != 0)
9124                         break;
9125         }
9126
9127         return err;
9128 }
9129
9130 #define TG3_MAC_LOOPBACK        0
9131 #define TG3_PHY_LOOPBACK        1
9132
9133 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9134 {
9135         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9136         u32 desc_idx;
9137         struct sk_buff *skb, *rx_skb;
9138         u8 *tx_data;
9139         dma_addr_t map;
9140         int num_pkts, tx_len, rx_len, i, err;
9141         struct tg3_rx_buffer_desc *desc;
9142
9143         if (loopback_mode == TG3_MAC_LOOPBACK) {
9144                 /* HW errata - mac loopback fails in some cases on 5780.
9145                  * Normal traffic and PHY loopback are not affected by
9146                  * errata.
9147                  */
9148                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9149                         return 0;
9150
9151                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9152                            MAC_MODE_PORT_INT_LPBACK;
9153                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9154                         mac_mode |= MAC_MODE_LINK_POLARITY;
9155                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9156                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9157                 else
9158                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9159                 tw32(MAC_MODE, mac_mode);
9160         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9161                 u32 val;
9162
9163                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9164                         u32 phytest;
9165
9166                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9167                                 u32 phy;
9168
9169                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9170                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9171                                 if (!tg3_readphy(tp, 0x1b, &phy))
9172                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9173                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9174                         }
9175                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9176                 } else
9177                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9178
9179                 tg3_phy_toggle_automdix(tp, 0);
9180
9181                 tg3_writephy(tp, MII_BMCR, val);
9182                 udelay(40);
9183
9184                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9185                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9186                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9187                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9188                 } else
9189                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9190
9191                 /* reset to prevent losing 1st rx packet intermittently */
9192                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9193                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9194                         udelay(10);
9195                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9196                 }
9197                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9198                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9199                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9200                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9201                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9202                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9203                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9204                 }
9205                 tw32(MAC_MODE, mac_mode);
9206         }
9207         else
9208                 return -EINVAL;
9209
9210         err = -EIO;
9211
9212         tx_len = 1514;
9213         skb = netdev_alloc_skb(tp->dev, tx_len);
9214         if (!skb)
9215                 return -ENOMEM;
9216
9217         tx_data = skb_put(skb, tx_len);
9218         memcpy(tx_data, tp->dev->dev_addr, 6);
9219         memset(tx_data + 6, 0x0, 8);
9220
9221         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9222
9223         for (i = 14; i < tx_len; i++)
9224                 tx_data[i] = (u8) (i & 0xff);
9225
9226         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9227
9228         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9229              HOSTCC_MODE_NOW);
9230
9231         udelay(10);
9232
9233         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9234
9235         num_pkts = 0;
9236
9237         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9238
9239         tp->tx_prod++;
9240         num_pkts++;
9241
9242         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9243                      tp->tx_prod);
9244         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9245
9246         udelay(10);
9247
9248         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9249         for (i = 0; i < 25; i++) {
9250                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9251                        HOSTCC_MODE_NOW);
9252
9253                 udelay(10);
9254
9255                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9256                 rx_idx = tp->hw_status->idx[0].rx_producer;
9257                 if ((tx_idx == tp->tx_prod) &&
9258                     (rx_idx == (rx_start_idx + num_pkts)))
9259                         break;
9260         }
9261
9262         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9263         dev_kfree_skb(skb);
9264
9265         if (tx_idx != tp->tx_prod)
9266                 goto out;
9267
9268         if (rx_idx != rx_start_idx + num_pkts)
9269                 goto out;
9270
9271         desc = &tp->rx_rcb[rx_start_idx];
9272         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9273         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9274         if (opaque_key != RXD_OPAQUE_RING_STD)
9275                 goto out;
9276
9277         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9278             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9279                 goto out;
9280
9281         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9282         if (rx_len != tx_len)
9283                 goto out;
9284
9285         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9286
9287         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9288         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9289
9290         for (i = 14; i < tx_len; i++) {
9291                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9292                         goto out;
9293         }
9294         err = 0;
9295
9296         /* tg3_free_rings will unmap and free the rx_skb */
9297 out:
9298         return err;
9299 }
9300
9301 #define TG3_MAC_LOOPBACK_FAILED         1
9302 #define TG3_PHY_LOOPBACK_FAILED         2
9303 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9304                                          TG3_PHY_LOOPBACK_FAILED)
9305
9306 static int tg3_test_loopback(struct tg3 *tp)
9307 {
9308         int err = 0;
9309         u32 cpmuctrl = 0;
9310
9311         if (!netif_running(tp->dev))
9312                 return TG3_LOOPBACK_FAILED;
9313
9314         err = tg3_reset_hw(tp, 1);
9315         if (err)
9316                 return TG3_LOOPBACK_FAILED;
9317
9318         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9319                 int i;
9320                 u32 status;
9321
9322                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9323
9324                 /* Wait for up to 40 microseconds to acquire lock. */
9325                 for (i = 0; i < 4; i++) {
9326                         status = tr32(TG3_CPMU_MUTEX_GNT);
9327                         if (status == CPMU_MUTEX_GNT_DRIVER)
9328                                 break;
9329                         udelay(10);
9330                 }
9331
9332                 if (status != CPMU_MUTEX_GNT_DRIVER)
9333                         return TG3_LOOPBACK_FAILED;
9334
9335                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9336
9337                 /* Turn off power management based on link speed. */
9338                 tw32(TG3_CPMU_CTRL,
9339                      cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE);
9340         }
9341
9342         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9343                 err |= TG3_MAC_LOOPBACK_FAILED;
9344
9345         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9346                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9347
9348                 /* Release the mutex */
9349                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9350         }
9351
9352         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9353                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9354                         err |= TG3_PHY_LOOPBACK_FAILED;
9355         }
9356
9357         return err;
9358 }
9359
9360 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9361                           u64 *data)
9362 {
9363         struct tg3 *tp = netdev_priv(dev);
9364
9365         if (tp->link_config.phy_is_low_power)
9366                 tg3_set_power_state(tp, PCI_D0);
9367
9368         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9369
9370         if (tg3_test_nvram(tp) != 0) {
9371                 etest->flags |= ETH_TEST_FL_FAILED;
9372                 data[0] = 1;
9373         }
9374         if (tg3_test_link(tp) != 0) {
9375                 etest->flags |= ETH_TEST_FL_FAILED;
9376                 data[1] = 1;
9377         }
9378         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9379                 int err, irq_sync = 0;
9380
9381                 if (netif_running(dev)) {
9382                         tg3_netif_stop(tp);
9383                         irq_sync = 1;
9384                 }
9385
9386                 tg3_full_lock(tp, irq_sync);
9387
9388                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9389                 err = tg3_nvram_lock(tp);
9390                 tg3_halt_cpu(tp, RX_CPU_BASE);
9391                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9392                         tg3_halt_cpu(tp, TX_CPU_BASE);
9393                 if (!err)
9394                         tg3_nvram_unlock(tp);
9395
9396                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9397                         tg3_phy_reset(tp);
9398
9399                 if (tg3_test_registers(tp) != 0) {
9400                         etest->flags |= ETH_TEST_FL_FAILED;
9401                         data[2] = 1;
9402                 }
9403                 if (tg3_test_memory(tp) != 0) {
9404                         etest->flags |= ETH_TEST_FL_FAILED;
9405                         data[3] = 1;
9406                 }
9407                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9408                         etest->flags |= ETH_TEST_FL_FAILED;
9409
9410                 tg3_full_unlock(tp);
9411
9412                 if (tg3_test_interrupt(tp) != 0) {
9413                         etest->flags |= ETH_TEST_FL_FAILED;
9414                         data[5] = 1;
9415                 }
9416
9417                 tg3_full_lock(tp, 0);
9418
9419                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9420                 if (netif_running(dev)) {
9421                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9422                         if (!tg3_restart_hw(tp, 1))
9423                                 tg3_netif_start(tp);
9424                 }
9425
9426                 tg3_full_unlock(tp);
9427         }
9428         if (tp->link_config.phy_is_low_power)
9429                 tg3_set_power_state(tp, PCI_D3hot);
9430
9431 }
9432
9433 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9434 {
9435         struct mii_ioctl_data *data = if_mii(ifr);
9436         struct tg3 *tp = netdev_priv(dev);
9437         int err;
9438
9439         switch(cmd) {
9440         case SIOCGMIIPHY:
9441                 data->phy_id = PHY_ADDR;
9442
9443                 /* fallthru */
9444         case SIOCGMIIREG: {
9445                 u32 mii_regval;
9446
9447                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9448                         break;                  /* We have no PHY */
9449
9450                 if (tp->link_config.phy_is_low_power)
9451                         return -EAGAIN;
9452
9453                 spin_lock_bh(&tp->lock);
9454                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9455                 spin_unlock_bh(&tp->lock);
9456
9457                 data->val_out = mii_regval;
9458
9459                 return err;
9460         }
9461
9462         case SIOCSMIIREG:
9463                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9464                         break;                  /* We have no PHY */
9465
9466                 if (!capable(CAP_NET_ADMIN))
9467                         return -EPERM;
9468
9469                 if (tp->link_config.phy_is_low_power)
9470                         return -EAGAIN;
9471
9472                 spin_lock_bh(&tp->lock);
9473                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9474                 spin_unlock_bh(&tp->lock);
9475
9476                 return err;
9477
9478         default:
9479                 /* do nothing */
9480                 break;
9481         }
9482         return -EOPNOTSUPP;
9483 }
9484
9485 #if TG3_VLAN_TAG_USED
9486 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9487 {
9488         struct tg3 *tp = netdev_priv(dev);
9489
9490         if (netif_running(dev))
9491                 tg3_netif_stop(tp);
9492
9493         tg3_full_lock(tp, 0);
9494
9495         tp->vlgrp = grp;
9496
9497         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9498         __tg3_set_rx_mode(dev);
9499
9500         if (netif_running(dev))
9501                 tg3_netif_start(tp);
9502
9503         tg3_full_unlock(tp);
9504 }
9505 #endif
9506
9507 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9508 {
9509         struct tg3 *tp = netdev_priv(dev);
9510
9511         memcpy(ec, &tp->coal, sizeof(*ec));
9512         return 0;
9513 }
9514
9515 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9516 {
9517         struct tg3 *tp = netdev_priv(dev);
9518         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9519         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9520
9521         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9522                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9523                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9524                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9525                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9526         }
9527
9528         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9529             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9530             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9531             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9532             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9533             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9534             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9535             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9536             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9537             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9538                 return -EINVAL;
9539
9540         /* No rx interrupts will be generated if both are zero */
9541         if ((ec->rx_coalesce_usecs == 0) &&
9542             (ec->rx_max_coalesced_frames == 0))
9543                 return -EINVAL;
9544
9545         /* No tx interrupts will be generated if both are zero */
9546         if ((ec->tx_coalesce_usecs == 0) &&
9547             (ec->tx_max_coalesced_frames == 0))
9548                 return -EINVAL;
9549
9550         /* Only copy relevant parameters, ignore all others. */
9551         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9552         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9553         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9554         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9555         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9556         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9557         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9558         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9559         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9560
9561         if (netif_running(dev)) {
9562                 tg3_full_lock(tp, 0);
9563                 __tg3_set_coalesce(tp, &tp->coal);
9564                 tg3_full_unlock(tp);
9565         }
9566         return 0;
9567 }
9568
9569 static const struct ethtool_ops tg3_ethtool_ops = {
9570         .get_settings           = tg3_get_settings,
9571         .set_settings           = tg3_set_settings,
9572         .get_drvinfo            = tg3_get_drvinfo,
9573         .get_regs_len           = tg3_get_regs_len,
9574         .get_regs               = tg3_get_regs,
9575         .get_wol                = tg3_get_wol,
9576         .set_wol                = tg3_set_wol,
9577         .get_msglevel           = tg3_get_msglevel,
9578         .set_msglevel           = tg3_set_msglevel,
9579         .nway_reset             = tg3_nway_reset,
9580         .get_link               = ethtool_op_get_link,
9581         .get_eeprom_len         = tg3_get_eeprom_len,
9582         .get_eeprom             = tg3_get_eeprom,
9583         .set_eeprom             = tg3_set_eeprom,
9584         .get_ringparam          = tg3_get_ringparam,
9585         .set_ringparam          = tg3_set_ringparam,
9586         .get_pauseparam         = tg3_get_pauseparam,
9587         .set_pauseparam         = tg3_set_pauseparam,
9588         .get_rx_csum            = tg3_get_rx_csum,
9589         .set_rx_csum            = tg3_set_rx_csum,
9590         .set_tx_csum            = tg3_set_tx_csum,
9591         .set_sg                 = ethtool_op_set_sg,
9592         .set_tso                = tg3_set_tso,
9593         .self_test              = tg3_self_test,
9594         .get_strings            = tg3_get_strings,
9595         .phys_id                = tg3_phys_id,
9596         .get_ethtool_stats      = tg3_get_ethtool_stats,
9597         .get_coalesce           = tg3_get_coalesce,
9598         .set_coalesce           = tg3_set_coalesce,
9599         .get_sset_count         = tg3_get_sset_count,
9600 };
9601
9602 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9603 {
9604         u32 cursize, val, magic;
9605
9606         tp->nvram_size = EEPROM_CHIP_SIZE;
9607
9608         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9609                 return;
9610
9611         if ((magic != TG3_EEPROM_MAGIC) &&
9612             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9613             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9614                 return;
9615
9616         /*
9617          * Size the chip by reading offsets at increasing powers of two.
9618          * When we encounter our validation signature, we know the addressing
9619          * has wrapped around, and thus have our chip size.
9620          */
9621         cursize = 0x10;
9622
9623         while (cursize < tp->nvram_size) {
9624                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9625                         return;
9626
9627                 if (val == magic)
9628                         break;
9629
9630                 cursize <<= 1;
9631         }
9632
9633         tp->nvram_size = cursize;
9634 }
9635
9636 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9637 {
9638         u32 val;
9639
9640         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9641                 return;
9642
9643         /* Selfboot format */
9644         if (val != TG3_EEPROM_MAGIC) {
9645                 tg3_get_eeprom_size(tp);
9646                 return;
9647         }
9648
9649         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9650                 if (val != 0) {
9651                         tp->nvram_size = (val >> 16) * 1024;
9652                         return;
9653                 }
9654         }
9655         tp->nvram_size = 0x80000;
9656 }
9657
9658 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9659 {
9660         u32 nvcfg1;
9661
9662         nvcfg1 = tr32(NVRAM_CFG1);
9663         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9664                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9665         }
9666         else {
9667                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9668                 tw32(NVRAM_CFG1, nvcfg1);
9669         }
9670
9671         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9672             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9673                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9674                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9675                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9676                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9677                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9678                                 break;
9679                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9680                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9681                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9682                                 break;
9683                         case FLASH_VENDOR_ATMEL_EEPROM:
9684                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9685                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9686                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9687                                 break;
9688                         case FLASH_VENDOR_ST:
9689                                 tp->nvram_jedecnum = JEDEC_ST;
9690                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9691                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9692                                 break;
9693                         case FLASH_VENDOR_SAIFUN:
9694                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9695                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9696                                 break;
9697                         case FLASH_VENDOR_SST_SMALL:
9698                         case FLASH_VENDOR_SST_LARGE:
9699                                 tp->nvram_jedecnum = JEDEC_SST;
9700                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9701                                 break;
9702                 }
9703         }
9704         else {
9705                 tp->nvram_jedecnum = JEDEC_ATMEL;
9706                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9707                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9708         }
9709 }
9710
9711 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9712 {
9713         u32 nvcfg1;
9714
9715         nvcfg1 = tr32(NVRAM_CFG1);
9716
9717         /* NVRAM protection for TPM */
9718         if (nvcfg1 & (1 << 27))
9719                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9720
9721         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9722                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9723                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9724                         tp->nvram_jedecnum = JEDEC_ATMEL;
9725                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9726                         break;
9727                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9728                         tp->nvram_jedecnum = JEDEC_ATMEL;
9729                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9730                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9731                         break;
9732                 case FLASH_5752VENDOR_ST_M45PE10:
9733                 case FLASH_5752VENDOR_ST_M45PE20:
9734                 case FLASH_5752VENDOR_ST_M45PE40:
9735                         tp->nvram_jedecnum = JEDEC_ST;
9736                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9737                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9738                         break;
9739         }
9740
9741         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9742                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9743                         case FLASH_5752PAGE_SIZE_256:
9744                                 tp->nvram_pagesize = 256;
9745                                 break;
9746                         case FLASH_5752PAGE_SIZE_512:
9747                                 tp->nvram_pagesize = 512;
9748                                 break;
9749                         case FLASH_5752PAGE_SIZE_1K:
9750                                 tp->nvram_pagesize = 1024;
9751                                 break;
9752                         case FLASH_5752PAGE_SIZE_2K:
9753                                 tp->nvram_pagesize = 2048;
9754                                 break;
9755                         case FLASH_5752PAGE_SIZE_4K:
9756                                 tp->nvram_pagesize = 4096;
9757                                 break;
9758                         case FLASH_5752PAGE_SIZE_264:
9759                                 tp->nvram_pagesize = 264;
9760                                 break;
9761                 }
9762         }
9763         else {
9764                 /* For eeprom, set pagesize to maximum eeprom size */
9765                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9766
9767                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9768                 tw32(NVRAM_CFG1, nvcfg1);
9769         }
9770 }
9771
9772 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9773 {
9774         u32 nvcfg1, protect = 0;
9775
9776         nvcfg1 = tr32(NVRAM_CFG1);
9777
9778         /* NVRAM protection for TPM */
9779         if (nvcfg1 & (1 << 27)) {
9780                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9781                 protect = 1;
9782         }
9783
9784         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9785         switch (nvcfg1) {
9786                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9787                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9788                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9789                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9790                         tp->nvram_jedecnum = JEDEC_ATMEL;
9791                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9792                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9793                         tp->nvram_pagesize = 264;
9794                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9795                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9796                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9797                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9798                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9799                         else
9800                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9801                         break;
9802                 case FLASH_5752VENDOR_ST_M45PE10:
9803                 case FLASH_5752VENDOR_ST_M45PE20:
9804                 case FLASH_5752VENDOR_ST_M45PE40:
9805                         tp->nvram_jedecnum = JEDEC_ST;
9806                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9807                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9808                         tp->nvram_pagesize = 256;
9809                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9810                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9811                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9812                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9813                         else
9814                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9815                         break;
9816         }
9817 }
9818
9819 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9820 {
9821         u32 nvcfg1;
9822
9823         nvcfg1 = tr32(NVRAM_CFG1);
9824
9825         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9826                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9827                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9828                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9829                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9830                         tp->nvram_jedecnum = JEDEC_ATMEL;
9831                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9832                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9833
9834                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9835                         tw32(NVRAM_CFG1, nvcfg1);
9836                         break;
9837                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9838                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9839                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9840                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9841                         tp->nvram_jedecnum = JEDEC_ATMEL;
9842                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9843                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9844                         tp->nvram_pagesize = 264;
9845                         break;
9846                 case FLASH_5752VENDOR_ST_M45PE10:
9847                 case FLASH_5752VENDOR_ST_M45PE20:
9848                 case FLASH_5752VENDOR_ST_M45PE40:
9849                         tp->nvram_jedecnum = JEDEC_ST;
9850                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9851                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9852                         tp->nvram_pagesize = 256;
9853                         break;
9854         }
9855 }
9856
9857 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9858 {
9859         u32 nvcfg1, protect = 0;
9860
9861         nvcfg1 = tr32(NVRAM_CFG1);
9862
9863         /* NVRAM protection for TPM */
9864         if (nvcfg1 & (1 << 27)) {
9865                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9866                 protect = 1;
9867         }
9868
9869         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9870         switch (nvcfg1) {
9871                 case FLASH_5761VENDOR_ATMEL_ADB021D:
9872                 case FLASH_5761VENDOR_ATMEL_ADB041D:
9873                 case FLASH_5761VENDOR_ATMEL_ADB081D:
9874                 case FLASH_5761VENDOR_ATMEL_ADB161D:
9875                 case FLASH_5761VENDOR_ATMEL_MDB021D:
9876                 case FLASH_5761VENDOR_ATMEL_MDB041D:
9877                 case FLASH_5761VENDOR_ATMEL_MDB081D:
9878                 case FLASH_5761VENDOR_ATMEL_MDB161D:
9879                         tp->nvram_jedecnum = JEDEC_ATMEL;
9880                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9881                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9882                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9883                         tp->nvram_pagesize = 256;
9884                         break;
9885                 case FLASH_5761VENDOR_ST_A_M45PE20:
9886                 case FLASH_5761VENDOR_ST_A_M45PE40:
9887                 case FLASH_5761VENDOR_ST_A_M45PE80:
9888                 case FLASH_5761VENDOR_ST_A_M45PE16:
9889                 case FLASH_5761VENDOR_ST_M_M45PE20:
9890                 case FLASH_5761VENDOR_ST_M_M45PE40:
9891                 case FLASH_5761VENDOR_ST_M_M45PE80:
9892                 case FLASH_5761VENDOR_ST_M_M45PE16:
9893                         tp->nvram_jedecnum = JEDEC_ST;
9894                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9895                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9896                         tp->nvram_pagesize = 256;
9897                         break;
9898         }
9899
9900         if (protect) {
9901                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9902         } else {
9903                 switch (nvcfg1) {
9904                         case FLASH_5761VENDOR_ATMEL_ADB161D:
9905                         case FLASH_5761VENDOR_ATMEL_MDB161D:
9906                         case FLASH_5761VENDOR_ST_A_M45PE16:
9907                         case FLASH_5761VENDOR_ST_M_M45PE16:
9908                                 tp->nvram_size = 0x100000;
9909                                 break;
9910                         case FLASH_5761VENDOR_ATMEL_ADB081D:
9911                         case FLASH_5761VENDOR_ATMEL_MDB081D:
9912                         case FLASH_5761VENDOR_ST_A_M45PE80:
9913                         case FLASH_5761VENDOR_ST_M_M45PE80:
9914                                 tp->nvram_size = 0x80000;
9915                                 break;
9916                         case FLASH_5761VENDOR_ATMEL_ADB041D:
9917                         case FLASH_5761VENDOR_ATMEL_MDB041D:
9918                         case FLASH_5761VENDOR_ST_A_M45PE40:
9919                         case FLASH_5761VENDOR_ST_M_M45PE40:
9920                                 tp->nvram_size = 0x40000;
9921                                 break;
9922                         case FLASH_5761VENDOR_ATMEL_ADB021D:
9923                         case FLASH_5761VENDOR_ATMEL_MDB021D:
9924                         case FLASH_5761VENDOR_ST_A_M45PE20:
9925                         case FLASH_5761VENDOR_ST_M_M45PE20:
9926                                 tp->nvram_size = 0x20000;
9927                                 break;
9928                 }
9929         }
9930 }
9931
9932 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9933 {
9934         tp->nvram_jedecnum = JEDEC_ATMEL;
9935         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9936         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9937 }
9938
9939 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9940 static void __devinit tg3_nvram_init(struct tg3 *tp)
9941 {
9942         tw32_f(GRC_EEPROM_ADDR,
9943              (EEPROM_ADDR_FSM_RESET |
9944               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9945                EEPROM_ADDR_CLKPERD_SHIFT)));
9946
9947         msleep(1);
9948
9949         /* Enable seeprom accesses. */
9950         tw32_f(GRC_LOCAL_CTRL,
9951              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9952         udelay(100);
9953
9954         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9955             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9956                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9957
9958                 if (tg3_nvram_lock(tp)) {
9959                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9960                                "tg3_nvram_init failed.\n", tp->dev->name);
9961                         return;
9962                 }
9963                 tg3_enable_nvram_access(tp);
9964
9965                 tp->nvram_size = 0;
9966
9967                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9968                         tg3_get_5752_nvram_info(tp);
9969                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9970                         tg3_get_5755_nvram_info(tp);
9971                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9972                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
9973                         tg3_get_5787_nvram_info(tp);
9974                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9975                         tg3_get_5761_nvram_info(tp);
9976                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9977                         tg3_get_5906_nvram_info(tp);
9978                 else
9979                         tg3_get_nvram_info(tp);
9980
9981                 if (tp->nvram_size == 0)
9982                         tg3_get_nvram_size(tp);
9983
9984                 tg3_disable_nvram_access(tp);
9985                 tg3_nvram_unlock(tp);
9986
9987         } else {
9988                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9989
9990                 tg3_get_eeprom_size(tp);
9991         }
9992 }
9993
9994 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9995                                         u32 offset, u32 *val)
9996 {
9997         u32 tmp;
9998         int i;
9999
10000         if (offset > EEPROM_ADDR_ADDR_MASK ||
10001             (offset % 4) != 0)
10002                 return -EINVAL;
10003
10004         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10005                                         EEPROM_ADDR_DEVID_MASK |
10006                                         EEPROM_ADDR_READ);
10007         tw32(GRC_EEPROM_ADDR,
10008              tmp |
10009              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10010              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10011               EEPROM_ADDR_ADDR_MASK) |
10012              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10013
10014         for (i = 0; i < 1000; i++) {
10015                 tmp = tr32(GRC_EEPROM_ADDR);
10016
10017                 if (tmp & EEPROM_ADDR_COMPLETE)
10018                         break;
10019                 msleep(1);
10020         }
10021         if (!(tmp & EEPROM_ADDR_COMPLETE))
10022                 return -EBUSY;
10023
10024         *val = tr32(GRC_EEPROM_DATA);
10025         return 0;
10026 }
10027
10028 #define NVRAM_CMD_TIMEOUT 10000
10029
10030 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10031 {
10032         int i;
10033
10034         tw32(NVRAM_CMD, nvram_cmd);
10035         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10036                 udelay(10);
10037                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10038                         udelay(10);
10039                         break;
10040                 }
10041         }
10042         if (i == NVRAM_CMD_TIMEOUT) {
10043                 return -EBUSY;
10044         }
10045         return 0;
10046 }
10047
10048 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10049 {
10050         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10051             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10052             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10053            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10054             (tp->nvram_jedecnum == JEDEC_ATMEL))
10055
10056                 addr = ((addr / tp->nvram_pagesize) <<
10057                         ATMEL_AT45DB0X1B_PAGE_POS) +
10058                        (addr % tp->nvram_pagesize);
10059
10060         return addr;
10061 }
10062
10063 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10064 {
10065         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10066             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10067             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10068            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10069             (tp->nvram_jedecnum == JEDEC_ATMEL))
10070
10071                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10072                         tp->nvram_pagesize) +
10073                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10074
10075         return addr;
10076 }
10077
10078 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10079 {
10080         int ret;
10081
10082         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10083                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10084
10085         offset = tg3_nvram_phys_addr(tp, offset);
10086
10087         if (offset > NVRAM_ADDR_MSK)
10088                 return -EINVAL;
10089
10090         ret = tg3_nvram_lock(tp);
10091         if (ret)
10092                 return ret;
10093
10094         tg3_enable_nvram_access(tp);
10095
10096         tw32(NVRAM_ADDR, offset);
10097         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10098                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10099
10100         if (ret == 0)
10101                 *val = swab32(tr32(NVRAM_RDDATA));
10102
10103         tg3_disable_nvram_access(tp);
10104
10105         tg3_nvram_unlock(tp);
10106
10107         return ret;
10108 }
10109
10110 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10111 {
10112         int err;
10113         u32 tmp;
10114
10115         err = tg3_nvram_read(tp, offset, &tmp);
10116         *val = swab32(tmp);
10117         return err;
10118 }
10119
10120 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10121                                     u32 offset, u32 len, u8 *buf)
10122 {
10123         int i, j, rc = 0;
10124         u32 val;
10125
10126         for (i = 0; i < len; i += 4) {
10127                 u32 addr, data;
10128
10129                 addr = offset + i;
10130
10131                 memcpy(&data, buf + i, 4);
10132
10133                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10134
10135                 val = tr32(GRC_EEPROM_ADDR);
10136                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10137
10138                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10139                         EEPROM_ADDR_READ);
10140                 tw32(GRC_EEPROM_ADDR, val |
10141                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10142                         (addr & EEPROM_ADDR_ADDR_MASK) |
10143                         EEPROM_ADDR_START |
10144                         EEPROM_ADDR_WRITE);
10145
10146                 for (j = 0; j < 1000; j++) {
10147                         val = tr32(GRC_EEPROM_ADDR);
10148
10149                         if (val & EEPROM_ADDR_COMPLETE)
10150                                 break;
10151                         msleep(1);
10152                 }
10153                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10154                         rc = -EBUSY;
10155                         break;
10156                 }
10157         }
10158
10159         return rc;
10160 }
10161
10162 /* offset and length are dword aligned */
10163 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10164                 u8 *buf)
10165 {
10166         int ret = 0;
10167         u32 pagesize = tp->nvram_pagesize;
10168         u32 pagemask = pagesize - 1;
10169         u32 nvram_cmd;
10170         u8 *tmp;
10171
10172         tmp = kmalloc(pagesize, GFP_KERNEL);
10173         if (tmp == NULL)
10174                 return -ENOMEM;
10175
10176         while (len) {
10177                 int j;
10178                 u32 phy_addr, page_off, size;
10179
10180                 phy_addr = offset & ~pagemask;
10181
10182                 for (j = 0; j < pagesize; j += 4) {
10183                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
10184                                                 (u32 *) (tmp + j))))
10185                                 break;
10186                 }
10187                 if (ret)
10188                         break;
10189
10190                 page_off = offset & pagemask;
10191                 size = pagesize;
10192                 if (len < size)
10193                         size = len;
10194
10195                 len -= size;
10196
10197                 memcpy(tmp + page_off, buf, size);
10198
10199                 offset = offset + (pagesize - page_off);
10200
10201                 tg3_enable_nvram_access(tp);
10202
10203                 /*
10204                  * Before we can erase the flash page, we need
10205                  * to issue a special "write enable" command.
10206                  */
10207                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10208
10209                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10210                         break;
10211
10212                 /* Erase the target page */
10213                 tw32(NVRAM_ADDR, phy_addr);
10214
10215                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10216                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10217
10218                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10219                         break;
10220
10221                 /* Issue another write enable to start the write. */
10222                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10223
10224                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10225                         break;
10226
10227                 for (j = 0; j < pagesize; j += 4) {
10228                         u32 data;
10229
10230                         data = *((u32 *) (tmp + j));
10231                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
10232
10233                         tw32(NVRAM_ADDR, phy_addr + j);
10234
10235                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10236                                 NVRAM_CMD_WR;
10237
10238                         if (j == 0)
10239                                 nvram_cmd |= NVRAM_CMD_FIRST;
10240                         else if (j == (pagesize - 4))
10241                                 nvram_cmd |= NVRAM_CMD_LAST;
10242
10243                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10244                                 break;
10245                 }
10246                 if (ret)
10247                         break;
10248         }
10249
10250         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10251         tg3_nvram_exec_cmd(tp, nvram_cmd);
10252
10253         kfree(tmp);
10254
10255         return ret;
10256 }
10257
10258 /* offset and length are dword aligned */
10259 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10260                 u8 *buf)
10261 {
10262         int i, ret = 0;
10263
10264         for (i = 0; i < len; i += 4, offset += 4) {
10265                 u32 data, page_off, phy_addr, nvram_cmd;
10266
10267                 memcpy(&data, buf + i, 4);
10268                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10269
10270                 page_off = offset % tp->nvram_pagesize;
10271
10272                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10273
10274                 tw32(NVRAM_ADDR, phy_addr);
10275
10276                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10277
10278                 if ((page_off == 0) || (i == 0))
10279                         nvram_cmd |= NVRAM_CMD_FIRST;
10280                 if (page_off == (tp->nvram_pagesize - 4))
10281                         nvram_cmd |= NVRAM_CMD_LAST;
10282
10283                 if (i == (len - 4))
10284                         nvram_cmd |= NVRAM_CMD_LAST;
10285
10286                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10287                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10288                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10289                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10290                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10291                     (tp->nvram_jedecnum == JEDEC_ST) &&
10292                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10293
10294                         if ((ret = tg3_nvram_exec_cmd(tp,
10295                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10296                                 NVRAM_CMD_DONE)))
10297
10298                                 break;
10299                 }
10300                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10301                         /* We always do complete word writes to eeprom. */
10302                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10303                 }
10304
10305                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10306                         break;
10307         }
10308         return ret;
10309 }
10310
10311 /* offset and length are dword aligned */
10312 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10313 {
10314         int ret;
10315
10316         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10317                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10318                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10319                 udelay(40);
10320         }
10321
10322         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10323                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10324         }
10325         else {
10326                 u32 grc_mode;
10327
10328                 ret = tg3_nvram_lock(tp);
10329                 if (ret)
10330                         return ret;
10331
10332                 tg3_enable_nvram_access(tp);
10333                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10334                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10335                         tw32(NVRAM_WRITE1, 0x406);
10336
10337                 grc_mode = tr32(GRC_MODE);
10338                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10339
10340                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10341                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10342
10343                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10344                                 buf);
10345                 }
10346                 else {
10347                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10348                                 buf);
10349                 }
10350
10351                 grc_mode = tr32(GRC_MODE);
10352                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10353
10354                 tg3_disable_nvram_access(tp);
10355                 tg3_nvram_unlock(tp);
10356         }
10357
10358         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10359                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10360                 udelay(40);
10361         }
10362
10363         return ret;
10364 }
10365
10366 struct subsys_tbl_ent {
10367         u16 subsys_vendor, subsys_devid;
10368         u32 phy_id;
10369 };
10370
10371 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10372         /* Broadcom boards. */
10373         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10374         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10375         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10376         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10377         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10378         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10379         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10380         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10381         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10382         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10383         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10384
10385         /* 3com boards. */
10386         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10387         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10388         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10389         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10390         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10391
10392         /* DELL boards. */
10393         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10394         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10395         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10396         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10397
10398         /* Compaq boards. */
10399         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10400         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10401         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10402         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10403         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10404
10405         /* IBM boards. */
10406         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10407 };
10408
10409 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10410 {
10411         int i;
10412
10413         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10414                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10415                      tp->pdev->subsystem_vendor) &&
10416                     (subsys_id_to_phy_id[i].subsys_devid ==
10417                      tp->pdev->subsystem_device))
10418                         return &subsys_id_to_phy_id[i];
10419         }
10420         return NULL;
10421 }
10422
10423 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10424 {
10425         u32 val;
10426         u16 pmcsr;
10427
10428         /* On some early chips the SRAM cannot be accessed in D3hot state,
10429          * so need make sure we're in D0.
10430          */
10431         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10432         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10433         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10434         msleep(1);
10435
10436         /* Make sure register accesses (indirect or otherwise)
10437          * will function correctly.
10438          */
10439         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10440                                tp->misc_host_ctrl);
10441
10442         /* The memory arbiter has to be enabled in order for SRAM accesses
10443          * to succeed.  Normally on powerup the tg3 chip firmware will make
10444          * sure it is enabled, but other entities such as system netboot
10445          * code might disable it.
10446          */
10447         val = tr32(MEMARB_MODE);
10448         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10449
10450         tp->phy_id = PHY_ID_INVALID;
10451         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10452
10453         /* Assume an onboard device and WOL capable by default.  */
10454         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10455
10456         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10457                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10458                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10459                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10460                 }
10461                 val = tr32(VCPU_CFGSHDW);
10462                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10463                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10464                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10465                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10466                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10467                 return;
10468         }
10469
10470         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10471         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10472                 u32 nic_cfg, led_cfg;
10473                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10474                 int eeprom_phy_serdes = 0;
10475
10476                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10477                 tp->nic_sram_data_cfg = nic_cfg;
10478
10479                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10480                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10481                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10482                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10483                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10484                     (ver > 0) && (ver < 0x100))
10485                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10486
10487                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10488                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10489                         eeprom_phy_serdes = 1;
10490
10491                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10492                 if (nic_phy_id != 0) {
10493                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10494                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10495
10496                         eeprom_phy_id  = (id1 >> 16) << 10;
10497                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10498                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10499                 } else
10500                         eeprom_phy_id = 0;
10501
10502                 tp->phy_id = eeprom_phy_id;
10503                 if (eeprom_phy_serdes) {
10504                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10505                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10506                         else
10507                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10508                 }
10509
10510                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10511                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10512                                     SHASTA_EXT_LED_MODE_MASK);
10513                 else
10514                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10515
10516                 switch (led_cfg) {
10517                 default:
10518                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10519                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10520                         break;
10521
10522                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10523                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10524                         break;
10525
10526                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10527                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10528
10529                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10530                          * read on some older 5700/5701 bootcode.
10531                          */
10532                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10533                             ASIC_REV_5700 ||
10534                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10535                             ASIC_REV_5701)
10536                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10537
10538                         break;
10539
10540                 case SHASTA_EXT_LED_SHARED:
10541                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10542                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10543                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10544                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10545                                                  LED_CTRL_MODE_PHY_2);
10546                         break;
10547
10548                 case SHASTA_EXT_LED_MAC:
10549                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10550                         break;
10551
10552                 case SHASTA_EXT_LED_COMBO:
10553                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10554                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10555                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10556                                                  LED_CTRL_MODE_PHY_2);
10557                         break;
10558
10559                 };
10560
10561                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10562                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10563                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10564                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10565
10566                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10567                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10568                         if ((tp->pdev->subsystem_vendor ==
10569                              PCI_VENDOR_ID_ARIMA) &&
10570                             (tp->pdev->subsystem_device == 0x205a ||
10571                              tp->pdev->subsystem_device == 0x2063))
10572                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10573                 } else {
10574                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10575                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10576                 }
10577
10578                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10579                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10580                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10581                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10582                 }
10583                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10584                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10585                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10586                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10587                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10588
10589                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10590                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10591                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10592
10593                 if (cfg2 & (1 << 17))
10594                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10595
10596                 /* serdes signal pre-emphasis in register 0x590 set by */
10597                 /* bootcode if bit 18 is set */
10598                 if (cfg2 & (1 << 18))
10599                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10600
10601                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10602                         u32 cfg3;
10603
10604                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10605                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10606                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10607                 }
10608         }
10609 }
10610
10611 static int __devinit tg3_phy_probe(struct tg3 *tp)
10612 {
10613         u32 hw_phy_id_1, hw_phy_id_2;
10614         u32 hw_phy_id, hw_phy_id_masked;
10615         int err;
10616
10617         /* Reading the PHY ID register can conflict with ASF
10618          * firwmare access to the PHY hardware.
10619          */
10620         err = 0;
10621         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10622             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10623                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10624         } else {
10625                 /* Now read the physical PHY_ID from the chip and verify
10626                  * that it is sane.  If it doesn't look good, we fall back
10627                  * to either the hard-coded table based PHY_ID and failing
10628                  * that the value found in the eeprom area.
10629                  */
10630                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10631                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10632
10633                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10634                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10635                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10636
10637                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10638         }
10639
10640         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10641                 tp->phy_id = hw_phy_id;
10642                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10643                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10644                 else
10645                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10646         } else {
10647                 if (tp->phy_id != PHY_ID_INVALID) {
10648                         /* Do nothing, phy ID already set up in
10649                          * tg3_get_eeprom_hw_cfg().
10650                          */
10651                 } else {
10652                         struct subsys_tbl_ent *p;
10653
10654                         /* No eeprom signature?  Try the hardcoded
10655                          * subsys device table.
10656                          */
10657                         p = lookup_by_subsys(tp);
10658                         if (!p)
10659                                 return -ENODEV;
10660
10661                         tp->phy_id = p->phy_id;
10662                         if (!tp->phy_id ||
10663                             tp->phy_id == PHY_ID_BCM8002)
10664                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10665                 }
10666         }
10667
10668         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10669             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10670             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10671                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10672
10673                 tg3_readphy(tp, MII_BMSR, &bmsr);
10674                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10675                     (bmsr & BMSR_LSTATUS))
10676                         goto skip_phy_reset;
10677
10678                 err = tg3_phy_reset(tp);
10679                 if (err)
10680                         return err;
10681
10682                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10683                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10684                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10685                 tg3_ctrl = 0;
10686                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10687                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10688                                     MII_TG3_CTRL_ADV_1000_FULL);
10689                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10690                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10691                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10692                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10693                 }
10694
10695                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10696                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10697                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10698                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10699                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10700
10701                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10702                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10703
10704                         tg3_writephy(tp, MII_BMCR,
10705                                      BMCR_ANENABLE | BMCR_ANRESTART);
10706                 }
10707                 tg3_phy_set_wirespeed(tp);
10708
10709                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10710                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10711                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10712         }
10713
10714 skip_phy_reset:
10715         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10716                 err = tg3_init_5401phy_dsp(tp);
10717                 if (err)
10718                         return err;
10719         }
10720
10721         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10722                 err = tg3_init_5401phy_dsp(tp);
10723         }
10724
10725         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10726                 tp->link_config.advertising =
10727                         (ADVERTISED_1000baseT_Half |
10728                          ADVERTISED_1000baseT_Full |
10729                          ADVERTISED_Autoneg |
10730                          ADVERTISED_FIBRE);
10731         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10732                 tp->link_config.advertising &=
10733                         ~(ADVERTISED_1000baseT_Half |
10734                           ADVERTISED_1000baseT_Full);
10735
10736         return err;
10737 }
10738
10739 static void __devinit tg3_read_partno(struct tg3 *tp)
10740 {
10741         unsigned char vpd_data[256];
10742         unsigned int i;
10743         u32 magic;
10744
10745         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10746                 goto out_not_found;
10747
10748         if (magic == TG3_EEPROM_MAGIC) {
10749                 for (i = 0; i < 256; i += 4) {
10750                         u32 tmp;
10751
10752                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10753                                 goto out_not_found;
10754
10755                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10756                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10757                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10758                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10759                 }
10760         } else {
10761                 int vpd_cap;
10762
10763                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10764                 for (i = 0; i < 256; i += 4) {
10765                         u32 tmp, j = 0;
10766                         u16 tmp16;
10767
10768                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10769                                               i);
10770                         while (j++ < 100) {
10771                                 pci_read_config_word(tp->pdev, vpd_cap +
10772                                                      PCI_VPD_ADDR, &tmp16);
10773                                 if (tmp16 & 0x8000)
10774                                         break;
10775                                 msleep(1);
10776                         }
10777                         if (!(tmp16 & 0x8000))
10778                                 goto out_not_found;
10779
10780                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10781                                               &tmp);
10782                         tmp = cpu_to_le32(tmp);
10783                         memcpy(&vpd_data[i], &tmp, 4);
10784                 }
10785         }
10786
10787         /* Now parse and find the part number. */
10788         for (i = 0; i < 254; ) {
10789                 unsigned char val = vpd_data[i];
10790                 unsigned int block_end;
10791
10792                 if (val == 0x82 || val == 0x91) {
10793                         i = (i + 3 +
10794                              (vpd_data[i + 1] +
10795                               (vpd_data[i + 2] << 8)));
10796                         continue;
10797                 }
10798
10799                 if (val != 0x90)
10800                         goto out_not_found;
10801
10802                 block_end = (i + 3 +
10803                              (vpd_data[i + 1] +
10804                               (vpd_data[i + 2] << 8)));
10805                 i += 3;
10806
10807                 if (block_end > 256)
10808                         goto out_not_found;
10809
10810                 while (i < (block_end - 2)) {
10811                         if (vpd_data[i + 0] == 'P' &&
10812                             vpd_data[i + 1] == 'N') {
10813                                 int partno_len = vpd_data[i + 2];
10814
10815                                 i += 3;
10816                                 if (partno_len > 24 || (partno_len + i) > 256)
10817                                         goto out_not_found;
10818
10819                                 memcpy(tp->board_part_number,
10820                                        &vpd_data[i], partno_len);
10821
10822                                 /* Success. */
10823                                 return;
10824                         }
10825                         i += 3 + vpd_data[i + 2];
10826                 }
10827
10828                 /* Part number not found. */
10829                 goto out_not_found;
10830         }
10831
10832 out_not_found:
10833         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10834                 strcpy(tp->board_part_number, "BCM95906");
10835         else
10836                 strcpy(tp->board_part_number, "none");
10837 }
10838
10839 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10840 {
10841         u32 val;
10842
10843         if (tg3_nvram_read_swab(tp, offset, &val) ||
10844             (val & 0xfc000000) != 0x0c000000 ||
10845             tg3_nvram_read_swab(tp, offset + 4, &val) ||
10846             val != 0)
10847                 return 0;
10848
10849         return 1;
10850 }
10851
10852 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10853 {
10854         u32 val, offset, start;
10855         u32 ver_offset;
10856         int i, bcnt;
10857
10858         if (tg3_nvram_read_swab(tp, 0, &val))
10859                 return;
10860
10861         if (val != TG3_EEPROM_MAGIC)
10862                 return;
10863
10864         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10865             tg3_nvram_read_swab(tp, 0x4, &start))
10866                 return;
10867
10868         offset = tg3_nvram_logical_addr(tp, offset);
10869
10870         if (!tg3_fw_img_is_valid(tp, offset) ||
10871             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10872                 return;
10873
10874         offset = offset + ver_offset - start;
10875         for (i = 0; i < 16; i += 4) {
10876                 if (tg3_nvram_read(tp, offset + i, &val))
10877                         return;
10878
10879                 val = le32_to_cpu(val);
10880                 memcpy(tp->fw_ver + i, &val, 4);
10881         }
10882
10883         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10884              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
10885                 return;
10886
10887         for (offset = TG3_NVM_DIR_START;
10888              offset < TG3_NVM_DIR_END;
10889              offset += TG3_NVM_DIRENT_SIZE) {
10890                 if (tg3_nvram_read_swab(tp, offset, &val))
10891                         return;
10892
10893                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
10894                         break;
10895         }
10896
10897         if (offset == TG3_NVM_DIR_END)
10898                 return;
10899
10900         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10901                 start = 0x08000000;
10902         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
10903                 return;
10904
10905         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
10906             !tg3_fw_img_is_valid(tp, offset) ||
10907             tg3_nvram_read_swab(tp, offset + 8, &val))
10908                 return;
10909
10910         offset += val - start;
10911
10912         bcnt = strlen(tp->fw_ver);
10913
10914         tp->fw_ver[bcnt++] = ',';
10915         tp->fw_ver[bcnt++] = ' ';
10916
10917         for (i = 0; i < 4; i++) {
10918                 if (tg3_nvram_read(tp, offset, &val))
10919                         return;
10920
10921                 val = le32_to_cpu(val);
10922                 offset += sizeof(val);
10923
10924                 if (bcnt > TG3_VER_SIZE - sizeof(val)) {
10925                         memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
10926                         break;
10927                 }
10928
10929                 memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
10930                 bcnt += sizeof(val);
10931         }
10932
10933         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
10934 }
10935
10936 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10937
10938 static int __devinit tg3_get_invariants(struct tg3 *tp)
10939 {
10940         static struct pci_device_id write_reorder_chipsets[] = {
10941                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10942                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10943                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10944                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10945                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10946                              PCI_DEVICE_ID_VIA_8385_0) },
10947                 { },
10948         };
10949         u32 misc_ctrl_reg;
10950         u32 cacheline_sz_reg;
10951         u32 pci_state_reg, grc_misc_cfg;
10952         u32 val;
10953         u16 pci_cmd;
10954         int err, pcie_cap;
10955
10956         /* Force memory write invalidate off.  If we leave it on,
10957          * then on 5700_BX chips we have to enable a workaround.
10958          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10959          * to match the cacheline size.  The Broadcom driver have this
10960          * workaround but turns MWI off all the times so never uses
10961          * it.  This seems to suggest that the workaround is insufficient.
10962          */
10963         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10964         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10965         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10966
10967         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10968          * has the register indirect write enable bit set before
10969          * we try to access any of the MMIO registers.  It is also
10970          * critical that the PCI-X hw workaround situation is decided
10971          * before that as well.
10972          */
10973         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10974                               &misc_ctrl_reg);
10975
10976         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10977                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10978         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10979                 u32 prod_id_asic_rev;
10980
10981                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10982                                       &prod_id_asic_rev);
10983                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10984         }
10985
10986         /* Wrong chip ID in 5752 A0. This code can be removed later
10987          * as A0 is not in production.
10988          */
10989         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10990                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10991
10992         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10993          * we need to disable memory and use config. cycles
10994          * only to access all registers. The 5702/03 chips
10995          * can mistakenly decode the special cycles from the
10996          * ICH chipsets as memory write cycles, causing corruption
10997          * of register and memory space. Only certain ICH bridges
10998          * will drive special cycles with non-zero data during the
10999          * address phase which can fall within the 5703's address
11000          * range. This is not an ICH bug as the PCI spec allows
11001          * non-zero address during special cycles. However, only
11002          * these ICH bridges are known to drive non-zero addresses
11003          * during special cycles.
11004          *
11005          * Since special cycles do not cross PCI bridges, we only
11006          * enable this workaround if the 5703 is on the secondary
11007          * bus of these ICH bridges.
11008          */
11009         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11010             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11011                 static struct tg3_dev_id {
11012                         u32     vendor;
11013                         u32     device;
11014                         u32     rev;
11015                 } ich_chipsets[] = {
11016                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11017                           PCI_ANY_ID },
11018                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11019                           PCI_ANY_ID },
11020                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11021                           0xa },
11022                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11023                           PCI_ANY_ID },
11024                         { },
11025                 };
11026                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11027                 struct pci_dev *bridge = NULL;
11028
11029                 while (pci_id->vendor != 0) {
11030                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11031                                                 bridge);
11032                         if (!bridge) {
11033                                 pci_id++;
11034                                 continue;
11035                         }
11036                         if (pci_id->rev != PCI_ANY_ID) {
11037                                 if (bridge->revision > pci_id->rev)
11038                                         continue;
11039                         }
11040                         if (bridge->subordinate &&
11041                             (bridge->subordinate->number ==
11042                              tp->pdev->bus->number)) {
11043
11044                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11045                                 pci_dev_put(bridge);
11046                                 break;
11047                         }
11048                 }
11049         }
11050
11051         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11052          * DMA addresses > 40-bit. This bridge may have other additional
11053          * 57xx devices behind it in some 4-port NIC designs for example.
11054          * Any tg3 device found behind the bridge will also need the 40-bit
11055          * DMA workaround.
11056          */
11057         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11058             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11059                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11060                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11061                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11062         }
11063         else {
11064                 struct pci_dev *bridge = NULL;
11065
11066                 do {
11067                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11068                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11069                                                 bridge);
11070                         if (bridge && bridge->subordinate &&
11071                             (bridge->subordinate->number <=
11072                              tp->pdev->bus->number) &&
11073                             (bridge->subordinate->subordinate >=
11074                              tp->pdev->bus->number)) {
11075                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11076                                 pci_dev_put(bridge);
11077                                 break;
11078                         }
11079                 } while (bridge);
11080         }
11081
11082         /* Initialize misc host control in PCI block. */
11083         tp->misc_host_ctrl |= (misc_ctrl_reg &
11084                                MISC_HOST_CTRL_CHIPREV);
11085         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11086                                tp->misc_host_ctrl);
11087
11088         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11089                               &cacheline_sz_reg);
11090
11091         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11092         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11093         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11094         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11095
11096         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11097             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11098                 tp->pdev_peer = tg3_find_peer(tp);
11099
11100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11101             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11102             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11103             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11104             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11105             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11106             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11107             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11108                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11109
11110         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11111             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11112                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11113
11114         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11115                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11116                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11117                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11118                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11119                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11120                      tp->pdev_peer == tp->pdev))
11121                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11122
11123                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11124                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11125                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11126                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11127                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11128                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11129                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11130                 } else {
11131                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11132                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11133                                 ASIC_REV_5750 &&
11134                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11135                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11136                 }
11137         }
11138
11139         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11140             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11141             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11142             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11143             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11144             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11145             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11146             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11147                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11148
11149         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11150         if (pcie_cap != 0) {
11151                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11152                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11153                         u16 lnkctl;
11154
11155                         pci_read_config_word(tp->pdev,
11156                                              pcie_cap + PCI_EXP_LNKCTL,
11157                                              &lnkctl);
11158                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11159                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11160                 }
11161         }
11162
11163         /* If we have an AMD 762 or VIA K8T800 chipset, write
11164          * reordering to the mailbox registers done by the host
11165          * controller can cause major troubles.  We read back from
11166          * every mailbox register write to force the writes to be
11167          * posted to the chip in order.
11168          */
11169         if (pci_dev_present(write_reorder_chipsets) &&
11170             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11171                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11172
11173         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11174             tp->pci_lat_timer < 64) {
11175                 tp->pci_lat_timer = 64;
11176
11177                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11178                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11179                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11180                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11181
11182                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11183                                        cacheline_sz_reg);
11184         }
11185
11186         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11187             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11188                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11189                 if (!tp->pcix_cap) {
11190                         printk(KERN_ERR PFX "Cannot find PCI-X "
11191                                             "capability, aborting.\n");
11192                         return -EIO;
11193                 }
11194         }
11195
11196         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11197                               &pci_state_reg);
11198
11199         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11200                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11201
11202                 /* If this is a 5700 BX chipset, and we are in PCI-X
11203                  * mode, enable register write workaround.
11204                  *
11205                  * The workaround is to use indirect register accesses
11206                  * for all chip writes not to mailbox registers.
11207                  */
11208                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11209                         u32 pm_reg;
11210
11211                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11212
11213                         /* The chip can have it's power management PCI config
11214                          * space registers clobbered due to this bug.
11215                          * So explicitly force the chip into D0 here.
11216                          */
11217                         pci_read_config_dword(tp->pdev,
11218                                               tp->pm_cap + PCI_PM_CTRL,
11219                                               &pm_reg);
11220                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11221                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11222                         pci_write_config_dword(tp->pdev,
11223                                                tp->pm_cap + PCI_PM_CTRL,
11224                                                pm_reg);
11225
11226                         /* Also, force SERR#/PERR# in PCI command. */
11227                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11228                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11229                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11230                 }
11231         }
11232
11233         /* 5700 BX chips need to have their TX producer index mailboxes
11234          * written twice to workaround a bug.
11235          */
11236         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11237                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11238
11239         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11240                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11241         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11242                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11243
11244         /* Chip-specific fixup from Broadcom driver */
11245         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11246             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11247                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11248                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11249         }
11250
11251         /* Default fast path register access methods */
11252         tp->read32 = tg3_read32;
11253         tp->write32 = tg3_write32;
11254         tp->read32_mbox = tg3_read32;
11255         tp->write32_mbox = tg3_write32;
11256         tp->write32_tx_mbox = tg3_write32;
11257         tp->write32_rx_mbox = tg3_write32;
11258
11259         /* Various workaround register access methods */
11260         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11261                 tp->write32 = tg3_write_indirect_reg32;
11262         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11263                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11264                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11265                 /*
11266                  * Back to back register writes can cause problems on these
11267                  * chips, the workaround is to read back all reg writes
11268                  * except those to mailbox regs.
11269                  *
11270                  * See tg3_write_indirect_reg32().
11271                  */
11272                 tp->write32 = tg3_write_flush_reg32;
11273         }
11274
11275
11276         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11277             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11278                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11279                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11280                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11281         }
11282
11283         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11284                 tp->read32 = tg3_read_indirect_reg32;
11285                 tp->write32 = tg3_write_indirect_reg32;
11286                 tp->read32_mbox = tg3_read_indirect_mbox;
11287                 tp->write32_mbox = tg3_write_indirect_mbox;
11288                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11289                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11290
11291                 iounmap(tp->regs);
11292                 tp->regs = NULL;
11293
11294                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11295                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11296                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11297         }
11298         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11299                 tp->read32_mbox = tg3_read32_mbox_5906;
11300                 tp->write32_mbox = tg3_write32_mbox_5906;
11301                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11302                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11303         }
11304
11305         if (tp->write32 == tg3_write_indirect_reg32 ||
11306             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11307              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11308               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11309                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11310
11311         /* Get eeprom hw config before calling tg3_set_power_state().
11312          * In particular, the TG3_FLG2_IS_NIC flag must be
11313          * determined before calling tg3_set_power_state() so that
11314          * we know whether or not to switch out of Vaux power.
11315          * When the flag is set, it means that GPIO1 is used for eeprom
11316          * write protect and also implies that it is a LOM where GPIOs
11317          * are not used to switch power.
11318          */
11319         tg3_get_eeprom_hw_cfg(tp);
11320
11321         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11322                 /* Allow reads and writes to the
11323                  * APE register and memory space.
11324                  */
11325                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11326                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11327                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11328                                        pci_state_reg);
11329         }
11330
11331         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11332             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11333                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11334
11335         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11336          * GPIO1 driven high will bring 5700's external PHY out of reset.
11337          * It is also used as eeprom write protect on LOMs.
11338          */
11339         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11340         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11341             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11342                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11343                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11344         /* Unused GPIO3 must be driven as output on 5752 because there
11345          * are no pull-up resistors on unused GPIO pins.
11346          */
11347         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11348                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11349
11350         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11351                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11352
11353         /* Force the chip into D0. */
11354         err = tg3_set_power_state(tp, PCI_D0);
11355         if (err) {
11356                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11357                        pci_name(tp->pdev));
11358                 return err;
11359         }
11360
11361         /* 5700 B0 chips do not support checksumming correctly due
11362          * to hardware bugs.
11363          */
11364         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11365                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11366
11367         /* Derive initial jumbo mode from MTU assigned in
11368          * ether_setup() via the alloc_etherdev() call
11369          */
11370         if (tp->dev->mtu > ETH_DATA_LEN &&
11371             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11372                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11373
11374         /* Determine WakeOnLan speed to use. */
11375         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11376             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11377             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11378             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11379                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11380         } else {
11381                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11382         }
11383
11384         /* A few boards don't want Ethernet@WireSpeed phy feature */
11385         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11386             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11387              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11388              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11389             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11390             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11391                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11392
11393         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11394             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11395                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11396         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11397                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11398
11399         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11400                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11401                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11402                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11403                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11404                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11405                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11406                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11407                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11408                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11409                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11410                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11411         }
11412
11413         tp->coalesce_mode = 0;
11414         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11415             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11416                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11417
11418         /* Initialize MAC MI mode, polling disabled. */
11419         tw32_f(MAC_MI_MODE, tp->mi_mode);
11420         udelay(80);
11421
11422         /* Initialize data/descriptor byte/word swapping. */
11423         val = tr32(GRC_MODE);
11424         val &= GRC_MODE_HOST_STACKUP;
11425         tw32(GRC_MODE, val | tp->grc_mode);
11426
11427         tg3_switch_clocks(tp);
11428
11429         /* Clear this out for sanity. */
11430         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11431
11432         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11433                               &pci_state_reg);
11434         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11435             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11436                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11437
11438                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11439                     chiprevid == CHIPREV_ID_5701_B0 ||
11440                     chiprevid == CHIPREV_ID_5701_B2 ||
11441                     chiprevid == CHIPREV_ID_5701_B5) {
11442                         void __iomem *sram_base;
11443
11444                         /* Write some dummy words into the SRAM status block
11445                          * area, see if it reads back correctly.  If the return
11446                          * value is bad, force enable the PCIX workaround.
11447                          */
11448                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11449
11450                         writel(0x00000000, sram_base);
11451                         writel(0x00000000, sram_base + 4);
11452                         writel(0xffffffff, sram_base + 4);
11453                         if (readl(sram_base) != 0x00000000)
11454                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11455                 }
11456         }
11457
11458         udelay(50);
11459         tg3_nvram_init(tp);
11460
11461         grc_misc_cfg = tr32(GRC_MISC_CFG);
11462         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11463
11464         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11465             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11466              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11467                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11468
11469         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11470             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11471                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11472         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11473                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11474                                       HOSTCC_MODE_CLRTICK_TXBD);
11475
11476                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11477                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11478                                        tp->misc_host_ctrl);
11479         }
11480
11481         /* these are limited to 10/100 only */
11482         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11483              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11484             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11485              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11486              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11487               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11488               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11489             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11490              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11491               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11492               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11493             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11494                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11495
11496         err = tg3_phy_probe(tp);
11497         if (err) {
11498                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11499                        pci_name(tp->pdev), err);
11500                 /* ... but do not return immediately ... */
11501         }
11502
11503         tg3_read_partno(tp);
11504         tg3_read_fw_ver(tp);
11505
11506         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11507                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11508         } else {
11509                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11510                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11511                 else
11512                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11513         }
11514
11515         /* 5700 {AX,BX} chips have a broken status block link
11516          * change bit implementation, so we must use the
11517          * status register in those cases.
11518          */
11519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11520                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11521         else
11522                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11523
11524         /* The led_ctrl is set during tg3_phy_probe, here we might
11525          * have to force the link status polling mechanism based
11526          * upon subsystem IDs.
11527          */
11528         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11529             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11530             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11531                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11532                                   TG3_FLAG_USE_LINKCHG_REG);
11533         }
11534
11535         /* For all SERDES we poll the MAC status register. */
11536         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11537                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11538         else
11539                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11540
11541         /* All chips before 5787 can get confused if TX buffers
11542          * straddle the 4GB address boundary in some cases.
11543          */
11544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11546             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11547             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11548             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11549                 tp->dev->hard_start_xmit = tg3_start_xmit;
11550         else
11551                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11552
11553         tp->rx_offset = 2;
11554         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11555             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11556                 tp->rx_offset = 0;
11557
11558         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11559
11560         /* Increment the rx prod index on the rx std ring by at most
11561          * 8 for these chips to workaround hw errata.
11562          */
11563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11564             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11565             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11566                 tp->rx_std_max_post = 8;
11567
11568         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11569                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11570                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11571
11572         return err;
11573 }
11574
11575 #ifdef CONFIG_SPARC
11576 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11577 {
11578         struct net_device *dev = tp->dev;
11579         struct pci_dev *pdev = tp->pdev;
11580         struct device_node *dp = pci_device_to_OF_node(pdev);
11581         const unsigned char *addr;
11582         int len;
11583
11584         addr = of_get_property(dp, "local-mac-address", &len);
11585         if (addr && len == 6) {
11586                 memcpy(dev->dev_addr, addr, 6);
11587                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11588                 return 0;
11589         }
11590         return -ENODEV;
11591 }
11592
11593 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11594 {
11595         struct net_device *dev = tp->dev;
11596
11597         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11598         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11599         return 0;
11600 }
11601 #endif
11602
11603 static int __devinit tg3_get_device_address(struct tg3 *tp)
11604 {
11605         struct net_device *dev = tp->dev;
11606         u32 hi, lo, mac_offset;
11607         int addr_ok = 0;
11608
11609 #ifdef CONFIG_SPARC
11610         if (!tg3_get_macaddr_sparc(tp))
11611                 return 0;
11612 #endif
11613
11614         mac_offset = 0x7c;
11615         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11616             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11617                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11618                         mac_offset = 0xcc;
11619                 if (tg3_nvram_lock(tp))
11620                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11621                 else
11622                         tg3_nvram_unlock(tp);
11623         }
11624         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11625                 mac_offset = 0x10;
11626
11627         /* First try to get it from MAC address mailbox. */
11628         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11629         if ((hi >> 16) == 0x484b) {
11630                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11631                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11632
11633                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11634                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11635                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11636                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11637                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11638
11639                 /* Some old bootcode may report a 0 MAC address in SRAM */
11640                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11641         }
11642         if (!addr_ok) {
11643                 /* Next, try NVRAM. */
11644                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11645                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11646                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11647                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11648                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11649                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11650                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11651                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11652                 }
11653                 /* Finally just fetch it out of the MAC control regs. */
11654                 else {
11655                         hi = tr32(MAC_ADDR_0_HIGH);
11656                         lo = tr32(MAC_ADDR_0_LOW);
11657
11658                         dev->dev_addr[5] = lo & 0xff;
11659                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11660                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11661                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11662                         dev->dev_addr[1] = hi & 0xff;
11663                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11664                 }
11665         }
11666
11667         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11668 #ifdef CONFIG_SPARC64
11669                 if (!tg3_get_default_macaddr_sparc(tp))
11670                         return 0;
11671 #endif
11672                 return -EINVAL;
11673         }
11674         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11675         return 0;
11676 }
11677
11678 #define BOUNDARY_SINGLE_CACHELINE       1
11679 #define BOUNDARY_MULTI_CACHELINE        2
11680
11681 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11682 {
11683         int cacheline_size;
11684         u8 byte;
11685         int goal;
11686
11687         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11688         if (byte == 0)
11689                 cacheline_size = 1024;
11690         else
11691                 cacheline_size = (int) byte * 4;
11692
11693         /* On 5703 and later chips, the boundary bits have no
11694          * effect.
11695          */
11696         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11697             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11698             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11699                 goto out;
11700
11701 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11702         goal = BOUNDARY_MULTI_CACHELINE;
11703 #else
11704 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11705         goal = BOUNDARY_SINGLE_CACHELINE;
11706 #else
11707         goal = 0;
11708 #endif
11709 #endif
11710
11711         if (!goal)
11712                 goto out;
11713
11714         /* PCI controllers on most RISC systems tend to disconnect
11715          * when a device tries to burst across a cache-line boundary.
11716          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11717          *
11718          * Unfortunately, for PCI-E there are only limited
11719          * write-side controls for this, and thus for reads
11720          * we will still get the disconnects.  We'll also waste
11721          * these PCI cycles for both read and write for chips
11722          * other than 5700 and 5701 which do not implement the
11723          * boundary bits.
11724          */
11725         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11726             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11727                 switch (cacheline_size) {
11728                 case 16:
11729                 case 32:
11730                 case 64:
11731                 case 128:
11732                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11733                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11734                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11735                         } else {
11736                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11737                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11738                         }
11739                         break;
11740
11741                 case 256:
11742                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11743                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11744                         break;
11745
11746                 default:
11747                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11748                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11749                         break;
11750                 };
11751         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11752                 switch (cacheline_size) {
11753                 case 16:
11754                 case 32:
11755                 case 64:
11756                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11757                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11758                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11759                                 break;
11760                         }
11761                         /* fallthrough */
11762                 case 128:
11763                 default:
11764                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11765                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11766                         break;
11767                 };
11768         } else {
11769                 switch (cacheline_size) {
11770                 case 16:
11771                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11772                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11773                                         DMA_RWCTRL_WRITE_BNDRY_16);
11774                                 break;
11775                         }
11776                         /* fallthrough */
11777                 case 32:
11778                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11779                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11780                                         DMA_RWCTRL_WRITE_BNDRY_32);
11781                                 break;
11782                         }
11783                         /* fallthrough */
11784                 case 64:
11785                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11786                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11787                                         DMA_RWCTRL_WRITE_BNDRY_64);
11788                                 break;
11789                         }
11790                         /* fallthrough */
11791                 case 128:
11792                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11793                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11794                                         DMA_RWCTRL_WRITE_BNDRY_128);
11795                                 break;
11796                         }
11797                         /* fallthrough */
11798                 case 256:
11799                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11800                                 DMA_RWCTRL_WRITE_BNDRY_256);
11801                         break;
11802                 case 512:
11803                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11804                                 DMA_RWCTRL_WRITE_BNDRY_512);
11805                         break;
11806                 case 1024:
11807                 default:
11808                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11809                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11810                         break;
11811                 };
11812         }
11813
11814 out:
11815         return val;
11816 }
11817
11818 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11819 {
11820         struct tg3_internal_buffer_desc test_desc;
11821         u32 sram_dma_descs;
11822         int i, ret;
11823
11824         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11825
11826         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11827         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11828         tw32(RDMAC_STATUS, 0);
11829         tw32(WDMAC_STATUS, 0);
11830
11831         tw32(BUFMGR_MODE, 0);
11832         tw32(FTQ_RESET, 0);
11833
11834         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11835         test_desc.addr_lo = buf_dma & 0xffffffff;
11836         test_desc.nic_mbuf = 0x00002100;
11837         test_desc.len = size;
11838
11839         /*
11840          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11841          * the *second* time the tg3 driver was getting loaded after an
11842          * initial scan.
11843          *
11844          * Broadcom tells me:
11845          *   ...the DMA engine is connected to the GRC block and a DMA
11846          *   reset may affect the GRC block in some unpredictable way...
11847          *   The behavior of resets to individual blocks has not been tested.
11848          *
11849          * Broadcom noted the GRC reset will also reset all sub-components.
11850          */
11851         if (to_device) {
11852                 test_desc.cqid_sqid = (13 << 8) | 2;
11853
11854                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11855                 udelay(40);
11856         } else {
11857                 test_desc.cqid_sqid = (16 << 8) | 7;
11858
11859                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11860                 udelay(40);
11861         }
11862         test_desc.flags = 0x00000005;
11863
11864         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11865                 u32 val;
11866
11867                 val = *(((u32 *)&test_desc) + i);
11868                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11869                                        sram_dma_descs + (i * sizeof(u32)));
11870                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11871         }
11872         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11873
11874         if (to_device) {
11875                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11876         } else {
11877                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11878         }
11879
11880         ret = -ENODEV;
11881         for (i = 0; i < 40; i++) {
11882                 u32 val;
11883
11884                 if (to_device)
11885                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11886                 else
11887                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11888                 if ((val & 0xffff) == sram_dma_descs) {
11889                         ret = 0;
11890                         break;
11891                 }
11892
11893                 udelay(100);
11894         }
11895
11896         return ret;
11897 }
11898
11899 #define TEST_BUFFER_SIZE        0x2000
11900
11901 static int __devinit tg3_test_dma(struct tg3 *tp)
11902 {
11903         dma_addr_t buf_dma;
11904         u32 *buf, saved_dma_rwctrl;
11905         int ret;
11906
11907         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11908         if (!buf) {
11909                 ret = -ENOMEM;
11910                 goto out_nofree;
11911         }
11912
11913         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11914                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11915
11916         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11917
11918         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11919                 /* DMA read watermark not used on PCIE */
11920                 tp->dma_rwctrl |= 0x00180000;
11921         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11922                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11923                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11924                         tp->dma_rwctrl |= 0x003f0000;
11925                 else
11926                         tp->dma_rwctrl |= 0x003f000f;
11927         } else {
11928                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11929                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11930                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11931                         u32 read_water = 0x7;
11932
11933                         /* If the 5704 is behind the EPB bridge, we can
11934                          * do the less restrictive ONE_DMA workaround for
11935                          * better performance.
11936                          */
11937                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11938                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11939                                 tp->dma_rwctrl |= 0x8000;
11940                         else if (ccval == 0x6 || ccval == 0x7)
11941                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11942
11943                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11944                                 read_water = 4;
11945                         /* Set bit 23 to enable PCIX hw bug fix */
11946                         tp->dma_rwctrl |=
11947                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11948                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11949                                 (1 << 23);
11950                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11951                         /* 5780 always in PCIX mode */
11952                         tp->dma_rwctrl |= 0x00144000;
11953                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11954                         /* 5714 always in PCIX mode */
11955                         tp->dma_rwctrl |= 0x00148000;
11956                 } else {
11957                         tp->dma_rwctrl |= 0x001b000f;
11958                 }
11959         }
11960
11961         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11962             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11963                 tp->dma_rwctrl &= 0xfffffff0;
11964
11965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11966             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11967                 /* Remove this if it causes problems for some boards. */
11968                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11969
11970                 /* On 5700/5701 chips, we need to set this bit.
11971                  * Otherwise the chip will issue cacheline transactions
11972                  * to streamable DMA memory with not all the byte
11973                  * enables turned on.  This is an error on several
11974                  * RISC PCI controllers, in particular sparc64.
11975                  *
11976                  * On 5703/5704 chips, this bit has been reassigned
11977                  * a different meaning.  In particular, it is used
11978                  * on those chips to enable a PCI-X workaround.
11979                  */
11980                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11981         }
11982
11983         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11984
11985 #if 0
11986         /* Unneeded, already done by tg3_get_invariants.  */
11987         tg3_switch_clocks(tp);
11988 #endif
11989
11990         ret = 0;
11991         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11992             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11993                 goto out;
11994
11995         /* It is best to perform DMA test with maximum write burst size
11996          * to expose the 5700/5701 write DMA bug.
11997          */
11998         saved_dma_rwctrl = tp->dma_rwctrl;
11999         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12000         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12001
12002         while (1) {
12003                 u32 *p = buf, i;
12004
12005                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12006                         p[i] = i;
12007
12008                 /* Send the buffer to the chip. */
12009                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12010                 if (ret) {
12011                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12012                         break;
12013                 }
12014
12015 #if 0
12016                 /* validate data reached card RAM correctly. */
12017                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12018                         u32 val;
12019                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12020                         if (le32_to_cpu(val) != p[i]) {
12021                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12022                                 /* ret = -ENODEV here? */
12023                         }
12024                         p[i] = 0;
12025                 }
12026 #endif
12027                 /* Now read it back. */
12028                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12029                 if (ret) {
12030                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12031
12032                         break;
12033                 }
12034
12035                 /* Verify it. */
12036                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12037                         if (p[i] == i)
12038                                 continue;
12039
12040                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12041                             DMA_RWCTRL_WRITE_BNDRY_16) {
12042                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12043                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12044                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12045                                 break;
12046                         } else {
12047                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12048                                 ret = -ENODEV;
12049                                 goto out;
12050                         }
12051                 }
12052
12053                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12054                         /* Success. */
12055                         ret = 0;
12056                         break;
12057                 }
12058         }
12059         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12060             DMA_RWCTRL_WRITE_BNDRY_16) {
12061                 static struct pci_device_id dma_wait_state_chipsets[] = {
12062                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12063                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12064                         { },
12065                 };
12066
12067                 /* DMA test passed without adjusting DMA boundary,
12068                  * now look for chipsets that are known to expose the
12069                  * DMA bug without failing the test.
12070                  */
12071                 if (pci_dev_present(dma_wait_state_chipsets)) {
12072                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12073                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12074                 }
12075                 else
12076                         /* Safe to use the calculated DMA boundary. */
12077                         tp->dma_rwctrl = saved_dma_rwctrl;
12078
12079                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12080         }
12081
12082 out:
12083         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12084 out_nofree:
12085         return ret;
12086 }
12087
12088 static void __devinit tg3_init_link_config(struct tg3 *tp)
12089 {
12090         tp->link_config.advertising =
12091                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12092                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12093                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12094                  ADVERTISED_Autoneg | ADVERTISED_MII);
12095         tp->link_config.speed = SPEED_INVALID;
12096         tp->link_config.duplex = DUPLEX_INVALID;
12097         tp->link_config.autoneg = AUTONEG_ENABLE;
12098         tp->link_config.active_speed = SPEED_INVALID;
12099         tp->link_config.active_duplex = DUPLEX_INVALID;
12100         tp->link_config.phy_is_low_power = 0;
12101         tp->link_config.orig_speed = SPEED_INVALID;
12102         tp->link_config.orig_duplex = DUPLEX_INVALID;
12103         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12104 }
12105
12106 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12107 {
12108         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12109                 tp->bufmgr_config.mbuf_read_dma_low_water =
12110                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12111                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12112                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12113                 tp->bufmgr_config.mbuf_high_water =
12114                         DEFAULT_MB_HIGH_WATER_5705;
12115                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12116                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12117                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12118                         tp->bufmgr_config.mbuf_high_water =
12119                                 DEFAULT_MB_HIGH_WATER_5906;
12120                 }
12121
12122                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12123                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12124                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12125                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12126                 tp->bufmgr_config.mbuf_high_water_jumbo =
12127                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12128         } else {
12129                 tp->bufmgr_config.mbuf_read_dma_low_water =
12130                         DEFAULT_MB_RDMA_LOW_WATER;
12131                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12132                         DEFAULT_MB_MACRX_LOW_WATER;
12133                 tp->bufmgr_config.mbuf_high_water =
12134                         DEFAULT_MB_HIGH_WATER;
12135
12136                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12137                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12138                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12139                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12140                 tp->bufmgr_config.mbuf_high_water_jumbo =
12141                         DEFAULT_MB_HIGH_WATER_JUMBO;
12142         }
12143
12144         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12145         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12146 }
12147
12148 static char * __devinit tg3_phy_string(struct tg3 *tp)
12149 {
12150         switch (tp->phy_id & PHY_ID_MASK) {
12151         case PHY_ID_BCM5400:    return "5400";
12152         case PHY_ID_BCM5401:    return "5401";
12153         case PHY_ID_BCM5411:    return "5411";
12154         case PHY_ID_BCM5701:    return "5701";
12155         case PHY_ID_BCM5703:    return "5703";
12156         case PHY_ID_BCM5704:    return "5704";
12157         case PHY_ID_BCM5705:    return "5705";
12158         case PHY_ID_BCM5750:    return "5750";
12159         case PHY_ID_BCM5752:    return "5752";
12160         case PHY_ID_BCM5714:    return "5714";
12161         case PHY_ID_BCM5780:    return "5780";
12162         case PHY_ID_BCM5755:    return "5755";
12163         case PHY_ID_BCM5787:    return "5787";
12164         case PHY_ID_BCM5784:    return "5784";
12165         case PHY_ID_BCM5756:    return "5722/5756";
12166         case PHY_ID_BCM5906:    return "5906";
12167         case PHY_ID_BCM5761:    return "5761";
12168         case PHY_ID_BCM8002:    return "8002/serdes";
12169         case 0:                 return "serdes";
12170         default:                return "unknown";
12171         };
12172 }
12173
12174 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12175 {
12176         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12177                 strcpy(str, "PCI Express");
12178                 return str;
12179         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12180                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12181
12182                 strcpy(str, "PCIX:");
12183
12184                 if ((clock_ctrl == 7) ||
12185                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12186                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12187                         strcat(str, "133MHz");
12188                 else if (clock_ctrl == 0)
12189                         strcat(str, "33MHz");
12190                 else if (clock_ctrl == 2)
12191                         strcat(str, "50MHz");
12192                 else if (clock_ctrl == 4)
12193                         strcat(str, "66MHz");
12194                 else if (clock_ctrl == 6)
12195                         strcat(str, "100MHz");
12196         } else {
12197                 strcpy(str, "PCI:");
12198                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12199                         strcat(str, "66MHz");
12200                 else
12201                         strcat(str, "33MHz");
12202         }
12203         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12204                 strcat(str, ":32-bit");
12205         else
12206                 strcat(str, ":64-bit");
12207         return str;
12208 }
12209
12210 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12211 {
12212         struct pci_dev *peer;
12213         unsigned int func, devnr = tp->pdev->devfn & ~7;
12214
12215         for (func = 0; func < 8; func++) {
12216                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12217                 if (peer && peer != tp->pdev)
12218                         break;
12219                 pci_dev_put(peer);
12220         }
12221         /* 5704 can be configured in single-port mode, set peer to
12222          * tp->pdev in that case.
12223          */
12224         if (!peer) {
12225                 peer = tp->pdev;
12226                 return peer;
12227         }
12228
12229         /*
12230          * We don't need to keep the refcount elevated; there's no way
12231          * to remove one half of this device without removing the other
12232          */
12233         pci_dev_put(peer);
12234
12235         return peer;
12236 }
12237
12238 static void __devinit tg3_init_coal(struct tg3 *tp)
12239 {
12240         struct ethtool_coalesce *ec = &tp->coal;
12241
12242         memset(ec, 0, sizeof(*ec));
12243         ec->cmd = ETHTOOL_GCOALESCE;
12244         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12245         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12246         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12247         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12248         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12249         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12250         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12251         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12252         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12253
12254         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12255                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12256                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12257                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12258                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12259                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12260         }
12261
12262         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12263                 ec->rx_coalesce_usecs_irq = 0;
12264                 ec->tx_coalesce_usecs_irq = 0;
12265                 ec->stats_block_coalesce_usecs = 0;
12266         }
12267 }
12268
12269 static int __devinit tg3_init_one(struct pci_dev *pdev,
12270                                   const struct pci_device_id *ent)
12271 {
12272         static int tg3_version_printed = 0;
12273         unsigned long tg3reg_base, tg3reg_len;
12274         struct net_device *dev;
12275         struct tg3 *tp;
12276         int i, err, pm_cap;
12277         char str[40];
12278         u64 dma_mask, persist_dma_mask;
12279
12280         if (tg3_version_printed++ == 0)
12281                 printk(KERN_INFO "%s", version);
12282
12283         err = pci_enable_device(pdev);
12284         if (err) {
12285                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12286                        "aborting.\n");
12287                 return err;
12288         }
12289
12290         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12291                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12292                        "base address, aborting.\n");
12293                 err = -ENODEV;
12294                 goto err_out_disable_pdev;
12295         }
12296
12297         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12298         if (err) {
12299                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12300                        "aborting.\n");
12301                 goto err_out_disable_pdev;
12302         }
12303
12304         pci_set_master(pdev);
12305
12306         /* Find power-management capability. */
12307         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12308         if (pm_cap == 0) {
12309                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12310                        "aborting.\n");
12311                 err = -EIO;
12312                 goto err_out_free_res;
12313         }
12314
12315         tg3reg_base = pci_resource_start(pdev, 0);
12316         tg3reg_len = pci_resource_len(pdev, 0);
12317
12318         dev = alloc_etherdev(sizeof(*tp));
12319         if (!dev) {
12320                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12321                 err = -ENOMEM;
12322                 goto err_out_free_res;
12323         }
12324
12325         SET_NETDEV_DEV(dev, &pdev->dev);
12326
12327 #if TG3_VLAN_TAG_USED
12328         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12329         dev->vlan_rx_register = tg3_vlan_rx_register;
12330 #endif
12331
12332         tp = netdev_priv(dev);
12333         tp->pdev = pdev;
12334         tp->dev = dev;
12335         tp->pm_cap = pm_cap;
12336         tp->mac_mode = TG3_DEF_MAC_MODE;
12337         tp->rx_mode = TG3_DEF_RX_MODE;
12338         tp->tx_mode = TG3_DEF_TX_MODE;
12339         tp->mi_mode = MAC_MI_MODE_BASE;
12340         if (tg3_debug > 0)
12341                 tp->msg_enable = tg3_debug;
12342         else
12343                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12344
12345         /* The word/byte swap controls here control register access byte
12346          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12347          * setting below.
12348          */
12349         tp->misc_host_ctrl =
12350                 MISC_HOST_CTRL_MASK_PCI_INT |
12351                 MISC_HOST_CTRL_WORD_SWAP |
12352                 MISC_HOST_CTRL_INDIR_ACCESS |
12353                 MISC_HOST_CTRL_PCISTATE_RW;
12354
12355         /* The NONFRM (non-frame) byte/word swap controls take effect
12356          * on descriptor entries, anything which isn't packet data.
12357          *
12358          * The StrongARM chips on the board (one for tx, one for rx)
12359          * are running in big-endian mode.
12360          */
12361         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12362                         GRC_MODE_WSWAP_NONFRM_DATA);
12363 #ifdef __BIG_ENDIAN
12364         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12365 #endif
12366         spin_lock_init(&tp->lock);
12367         spin_lock_init(&tp->indirect_lock);
12368         INIT_WORK(&tp->reset_task, tg3_reset_task);
12369
12370         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12371         if (!tp->regs) {
12372                 printk(KERN_ERR PFX "Cannot map device registers, "
12373                        "aborting.\n");
12374                 err = -ENOMEM;
12375                 goto err_out_free_dev;
12376         }
12377
12378         tg3_init_link_config(tp);
12379
12380         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12381         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12382         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12383
12384         dev->open = tg3_open;
12385         dev->stop = tg3_close;
12386         dev->get_stats = tg3_get_stats;
12387         dev->set_multicast_list = tg3_set_rx_mode;
12388         dev->set_mac_address = tg3_set_mac_addr;
12389         dev->do_ioctl = tg3_ioctl;
12390         dev->tx_timeout = tg3_tx_timeout;
12391         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12392         dev->ethtool_ops = &tg3_ethtool_ops;
12393         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12394         dev->change_mtu = tg3_change_mtu;
12395         dev->irq = pdev->irq;
12396 #ifdef CONFIG_NET_POLL_CONTROLLER
12397         dev->poll_controller = tg3_poll_controller;
12398 #endif
12399
12400         err = tg3_get_invariants(tp);
12401         if (err) {
12402                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12403                        "aborting.\n");
12404                 goto err_out_iounmap;
12405         }
12406
12407         /* The EPB bridge inside 5714, 5715, and 5780 and any
12408          * device behind the EPB cannot support DMA addresses > 40-bit.
12409          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12410          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12411          * do DMA address check in tg3_start_xmit().
12412          */
12413         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12414                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12415         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12416                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12417 #ifdef CONFIG_HIGHMEM
12418                 dma_mask = DMA_64BIT_MASK;
12419 #endif
12420         } else
12421                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12422
12423         /* Configure DMA attributes. */
12424         if (dma_mask > DMA_32BIT_MASK) {
12425                 err = pci_set_dma_mask(pdev, dma_mask);
12426                 if (!err) {
12427                         dev->features |= NETIF_F_HIGHDMA;
12428                         err = pci_set_consistent_dma_mask(pdev,
12429                                                           persist_dma_mask);
12430                         if (err < 0) {
12431                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12432                                        "DMA for consistent allocations\n");
12433                                 goto err_out_iounmap;
12434                         }
12435                 }
12436         }
12437         if (err || dma_mask == DMA_32BIT_MASK) {
12438                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12439                 if (err) {
12440                         printk(KERN_ERR PFX "No usable DMA configuration, "
12441                                "aborting.\n");
12442                         goto err_out_iounmap;
12443                 }
12444         }
12445
12446         tg3_init_bufmgr_config(tp);
12447
12448         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12449                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12450         }
12451         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12452             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12453             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12454             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12455             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12456                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12457         } else {
12458                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12459         }
12460
12461         /* TSO is on by default on chips that support hardware TSO.
12462          * Firmware TSO on older chips gives lower performance, so it
12463          * is off by default, but can be enabled using ethtool.
12464          */
12465         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12466                 dev->features |= NETIF_F_TSO;
12467                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12468                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12469                         dev->features |= NETIF_F_TSO6;
12470                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12471                         dev->features |= NETIF_F_TSO_ECN;
12472         }
12473
12474
12475         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12476             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12477             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12478                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12479                 tp->rx_pending = 63;
12480         }
12481
12482         err = tg3_get_device_address(tp);
12483         if (err) {
12484                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12485                        "aborting.\n");
12486                 goto err_out_iounmap;
12487         }
12488
12489         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12490                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12491                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12492                                "base address for APE, aborting.\n");
12493                         err = -ENODEV;
12494                         goto err_out_iounmap;
12495                 }
12496
12497                 tg3reg_base = pci_resource_start(pdev, 2);
12498                 tg3reg_len = pci_resource_len(pdev, 2);
12499
12500                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12501                 if (tp->aperegs == 0UL) {
12502                         printk(KERN_ERR PFX "Cannot map APE registers, "
12503                                "aborting.\n");
12504                         err = -ENOMEM;
12505                         goto err_out_iounmap;
12506                 }
12507
12508                 tg3_ape_lock_init(tp);
12509         }
12510
12511         /*
12512          * Reset chip in case UNDI or EFI driver did not shutdown
12513          * DMA self test will enable WDMAC and we'll see (spurious)
12514          * pending DMA on the PCI bus at that point.
12515          */
12516         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12517             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12518                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12519                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12520         }
12521
12522         err = tg3_test_dma(tp);
12523         if (err) {
12524                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12525                 goto err_out_apeunmap;
12526         }
12527
12528         /* Tigon3 can do ipv4 only... and some chips have buggy
12529          * checksumming.
12530          */
12531         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12532                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12533                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12534                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12535                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12536                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12537                         dev->features |= NETIF_F_IPV6_CSUM;
12538
12539                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12540         } else
12541                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12542
12543         /* flow control autonegotiation is default behavior */
12544         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12545
12546         tg3_init_coal(tp);
12547
12548         pci_set_drvdata(pdev, dev);
12549
12550         err = register_netdev(dev);
12551         if (err) {
12552                 printk(KERN_ERR PFX "Cannot register net device, "
12553                        "aborting.\n");
12554                 goto err_out_apeunmap;
12555         }
12556
12557         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12558                dev->name,
12559                tp->board_part_number,
12560                tp->pci_chip_rev_id,
12561                tg3_phy_string(tp),
12562                tg3_bus_string(tp, str),
12563                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12564                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12565                  "10/100/1000Base-T")));
12566
12567         for (i = 0; i < 6; i++)
12568                 printk("%2.2x%c", dev->dev_addr[i],
12569                        i == 5 ? '\n' : ':');
12570
12571         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12572                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12573                dev->name,
12574                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12575                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12576                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12577                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12578                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12579                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12580         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12581                dev->name, tp->dma_rwctrl,
12582                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12583                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12584
12585         return 0;
12586
12587 err_out_apeunmap:
12588         if (tp->aperegs) {
12589                 iounmap(tp->aperegs);
12590                 tp->aperegs = NULL;
12591         }
12592
12593 err_out_iounmap:
12594         if (tp->regs) {
12595                 iounmap(tp->regs);
12596                 tp->regs = NULL;
12597         }
12598
12599 err_out_free_dev:
12600         free_netdev(dev);
12601
12602 err_out_free_res:
12603         pci_release_regions(pdev);
12604
12605 err_out_disable_pdev:
12606         pci_disable_device(pdev);
12607         pci_set_drvdata(pdev, NULL);
12608         return err;
12609 }
12610
12611 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12612 {
12613         struct net_device *dev = pci_get_drvdata(pdev);
12614
12615         if (dev) {
12616                 struct tg3 *tp = netdev_priv(dev);
12617
12618                 flush_scheduled_work();
12619                 unregister_netdev(dev);
12620                 if (tp->aperegs) {
12621                         iounmap(tp->aperegs);
12622                         tp->aperegs = NULL;
12623                 }
12624                 if (tp->regs) {
12625                         iounmap(tp->regs);
12626                         tp->regs = NULL;
12627                 }
12628                 free_netdev(dev);
12629                 pci_release_regions(pdev);
12630                 pci_disable_device(pdev);
12631                 pci_set_drvdata(pdev, NULL);
12632         }
12633 }
12634
12635 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12636 {
12637         struct net_device *dev = pci_get_drvdata(pdev);
12638         struct tg3 *tp = netdev_priv(dev);
12639         int err;
12640
12641         /* PCI register 4 needs to be saved whether netif_running() or not.
12642          * MSI address and data need to be saved if using MSI and
12643          * netif_running().
12644          */
12645         pci_save_state(pdev);
12646
12647         if (!netif_running(dev))
12648                 return 0;
12649
12650         flush_scheduled_work();
12651         tg3_netif_stop(tp);
12652
12653         del_timer_sync(&tp->timer);
12654
12655         tg3_full_lock(tp, 1);
12656         tg3_disable_ints(tp);
12657         tg3_full_unlock(tp);
12658
12659         netif_device_detach(dev);
12660
12661         tg3_full_lock(tp, 0);
12662         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12663         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12664         tg3_full_unlock(tp);
12665
12666         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12667         if (err) {
12668                 tg3_full_lock(tp, 0);
12669
12670                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12671                 if (tg3_restart_hw(tp, 1))
12672                         goto out;
12673
12674                 tp->timer.expires = jiffies + tp->timer_offset;
12675                 add_timer(&tp->timer);
12676
12677                 netif_device_attach(dev);
12678                 tg3_netif_start(tp);
12679
12680 out:
12681                 tg3_full_unlock(tp);
12682         }
12683
12684         return err;
12685 }
12686
12687 static int tg3_resume(struct pci_dev *pdev)
12688 {
12689         struct net_device *dev = pci_get_drvdata(pdev);
12690         struct tg3 *tp = netdev_priv(dev);
12691         int err;
12692
12693         pci_restore_state(tp->pdev);
12694
12695         if (!netif_running(dev))
12696                 return 0;
12697
12698         err = tg3_set_power_state(tp, PCI_D0);
12699         if (err)
12700                 return err;
12701
12702         netif_device_attach(dev);
12703
12704         tg3_full_lock(tp, 0);
12705
12706         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12707         err = tg3_restart_hw(tp, 1);
12708         if (err)
12709                 goto out;
12710
12711         tp->timer.expires = jiffies + tp->timer_offset;
12712         add_timer(&tp->timer);
12713
12714         tg3_netif_start(tp);
12715
12716 out:
12717         tg3_full_unlock(tp);
12718
12719         return err;
12720 }
12721
12722 static struct pci_driver tg3_driver = {
12723         .name           = DRV_MODULE_NAME,
12724         .id_table       = tg3_pci_tbl,
12725         .probe          = tg3_init_one,
12726         .remove         = __devexit_p(tg3_remove_one),
12727         .suspend        = tg3_suspend,
12728         .resume         = tg3_resume
12729 };
12730
12731 static int __init tg3_init(void)
12732 {
12733         return pci_register_driver(&tg3_driver);
12734 }
12735
12736 static void __exit tg3_cleanup(void)
12737 {
12738         pci_unregister_driver(&tg3_driver);
12739 }
12740
12741 module_init(tg3_init);
12742 module_exit(tg3_cleanup);