BUG_ON() Conversion in drivers/net/
[safe/jmp/linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.55"
73 #define DRV_MODULE_RELDATE      "Mar 27, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
261           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
263           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264         { 0, }
265 };
266
267 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
268
269 static struct {
270         const char string[ETH_GSTRING_LEN];
271 } ethtool_stats_keys[TG3_NUM_STATS] = {
272         { "rx_octets" },
273         { "rx_fragments" },
274         { "rx_ucast_packets" },
275         { "rx_mcast_packets" },
276         { "rx_bcast_packets" },
277         { "rx_fcs_errors" },
278         { "rx_align_errors" },
279         { "rx_xon_pause_rcvd" },
280         { "rx_xoff_pause_rcvd" },
281         { "rx_mac_ctrl_rcvd" },
282         { "rx_xoff_entered" },
283         { "rx_frame_too_long_errors" },
284         { "rx_jabbers" },
285         { "rx_undersize_packets" },
286         { "rx_in_length_errors" },
287         { "rx_out_length_errors" },
288         { "rx_64_or_less_octet_packets" },
289         { "rx_65_to_127_octet_packets" },
290         { "rx_128_to_255_octet_packets" },
291         { "rx_256_to_511_octet_packets" },
292         { "rx_512_to_1023_octet_packets" },
293         { "rx_1024_to_1522_octet_packets" },
294         { "rx_1523_to_2047_octet_packets" },
295         { "rx_2048_to_4095_octet_packets" },
296         { "rx_4096_to_8191_octet_packets" },
297         { "rx_8192_to_9022_octet_packets" },
298
299         { "tx_octets" },
300         { "tx_collisions" },
301
302         { "tx_xon_sent" },
303         { "tx_xoff_sent" },
304         { "tx_flow_control" },
305         { "tx_mac_errors" },
306         { "tx_single_collisions" },
307         { "tx_mult_collisions" },
308         { "tx_deferred" },
309         { "tx_excessive_collisions" },
310         { "tx_late_collisions" },
311         { "tx_collide_2times" },
312         { "tx_collide_3times" },
313         { "tx_collide_4times" },
314         { "tx_collide_5times" },
315         { "tx_collide_6times" },
316         { "tx_collide_7times" },
317         { "tx_collide_8times" },
318         { "tx_collide_9times" },
319         { "tx_collide_10times" },
320         { "tx_collide_11times" },
321         { "tx_collide_12times" },
322         { "tx_collide_13times" },
323         { "tx_collide_14times" },
324         { "tx_collide_15times" },
325         { "tx_ucast_packets" },
326         { "tx_mcast_packets" },
327         { "tx_bcast_packets" },
328         { "tx_carrier_sense_errors" },
329         { "tx_discards" },
330         { "tx_errors" },
331
332         { "dma_writeq_full" },
333         { "dma_write_prioq_full" },
334         { "rxbds_empty" },
335         { "rx_discards" },
336         { "rx_errors" },
337         { "rx_threshold_hit" },
338
339         { "dma_readq_full" },
340         { "dma_read_prioq_full" },
341         { "tx_comp_queue_full" },
342
343         { "ring_set_send_prod_index" },
344         { "ring_status_update" },
345         { "nic_irqs" },
346         { "nic_avoided_irqs" },
347         { "nic_tx_threshold_hit" }
348 };
349
350 static struct {
351         const char string[ETH_GSTRING_LEN];
352 } ethtool_test_keys[TG3_NUM_TEST] = {
353         { "nvram test     (online) " },
354         { "link test      (online) " },
355         { "register test  (offline)" },
356         { "memory test    (offline)" },
357         { "loopback test  (offline)" },
358         { "interrupt test (offline)" },
359 };
360
361 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
362 {
363         writel(val, tp->regs + off);
364 }
365
366 static u32 tg3_read32(struct tg3 *tp, u32 off)
367 {
368         return (readl(tp->regs + off)); 
369 }
370
371 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
372 {
373         unsigned long flags;
374
375         spin_lock_irqsave(&tp->indirect_lock, flags);
376         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
378         spin_unlock_irqrestore(&tp->indirect_lock, flags);
379 }
380
381 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
382 {
383         writel(val, tp->regs + off);
384         readl(tp->regs + off);
385 }
386
387 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
388 {
389         unsigned long flags;
390         u32 val;
391
392         spin_lock_irqsave(&tp->indirect_lock, flags);
393         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
394         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395         spin_unlock_irqrestore(&tp->indirect_lock, flags);
396         return val;
397 }
398
399 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
400 {
401         unsigned long flags;
402
403         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
404                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
405                                        TG3_64BIT_REG_LOW, val);
406                 return;
407         }
408         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
409                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
410                                        TG3_64BIT_REG_LOW, val);
411                 return;
412         }
413
414         spin_lock_irqsave(&tp->indirect_lock, flags);
415         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
416         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
417         spin_unlock_irqrestore(&tp->indirect_lock, flags);
418
419         /* In indirect mode when disabling interrupts, we also need
420          * to clear the interrupt bit in the GRC local ctrl register.
421          */
422         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
423             (val == 0x1)) {
424                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
425                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
426         }
427 }
428
429 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
430 {
431         unsigned long flags;
432         u32 val;
433
434         spin_lock_irqsave(&tp->indirect_lock, flags);
435         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
436         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
437         spin_unlock_irqrestore(&tp->indirect_lock, flags);
438         return val;
439 }
440
441 /* usec_wait specifies the wait time in usec when writing to certain registers
442  * where it is unsafe to read back the register without some delay.
443  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
444  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
445  */
446 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
447 {
448         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
449             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
450                 /* Non-posted methods */
451                 tp->write32(tp, off, val);
452         else {
453                 /* Posted method */
454                 tg3_write32(tp, off, val);
455                 if (usec_wait)
456                         udelay(usec_wait);
457                 tp->read32(tp, off);
458         }
459         /* Wait again after the read for the posted method to guarantee that
460          * the wait time is met.
461          */
462         if (usec_wait)
463                 udelay(usec_wait);
464 }
465
466 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
467 {
468         tp->write32_mbox(tp, off, val);
469         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
470             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
471                 tp->read32_mbox(tp, off);
472 }
473
474 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
475 {
476         void __iomem *mbox = tp->regs + off;
477         writel(val, mbox);
478         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
479                 writel(val, mbox);
480         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
481                 readl(mbox);
482 }
483
484 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
485 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
486 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
487 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
488 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
489
490 #define tw32(reg,val)           tp->write32(tp, reg, val)
491 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
492 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
493 #define tr32(reg)               tp->read32(tp, reg)
494
495 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
496 {
497         unsigned long flags;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->write32 != tg3_write_indirect_reg32) {
501                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
509
510                 /* Always leave this as zero. */
511                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
517 {
518         unsigned long flags;
519
520         spin_lock_irqsave(&tp->indirect_lock, flags);
521         if (tp->write32 != tg3_write_indirect_reg32) {
522                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
523                 *val = tr32(TG3PCI_MEM_WIN_DATA);
524
525                 /* Always leave this as zero. */
526                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
527         } else {
528                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
529                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
530
531                 /* Always leave this as zero. */
532                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
533         }
534         spin_unlock_irqrestore(&tp->indirect_lock, flags);
535 }
536
537 static void tg3_disable_ints(struct tg3 *tp)
538 {
539         tw32(TG3PCI_MISC_HOST_CTRL,
540              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
541         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
542 }
543
544 static inline void tg3_cond_int(struct tg3 *tp)
545 {
546         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
547             (tp->hw_status->status & SD_STATUS_UPDATED))
548                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
549 }
550
551 static void tg3_enable_ints(struct tg3 *tp)
552 {
553         tp->irq_sync = 0;
554         wmb();
555
556         tw32(TG3PCI_MISC_HOST_CTRL,
557              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
558         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
559                        (tp->last_tag << 24));
560         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
561                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
562                                (tp->last_tag << 24));
563         tg3_cond_int(tp);
564 }
565
566 static inline unsigned int tg3_has_work(struct tg3 *tp)
567 {
568         struct tg3_hw_status *sblk = tp->hw_status;
569         unsigned int work_exists = 0;
570
571         /* check for phy events */
572         if (!(tp->tg3_flags &
573               (TG3_FLAG_USE_LINKCHG_REG |
574                TG3_FLAG_POLL_SERDES))) {
575                 if (sblk->status & SD_STATUS_LINK_CHG)
576                         work_exists = 1;
577         }
578         /* check for RX/TX work to do */
579         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
580             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
581                 work_exists = 1;
582
583         return work_exists;
584 }
585
586 /* tg3_restart_ints
587  *  similar to tg3_enable_ints, but it accurately determines whether there
588  *  is new work pending and can return without flushing the PIO write
589  *  which reenables interrupts 
590  */
591 static void tg3_restart_ints(struct tg3 *tp)
592 {
593         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
594                      tp->last_tag << 24);
595         mmiowb();
596
597         /* When doing tagged status, this work check is unnecessary.
598          * The last_tag we write above tells the chip which piece of
599          * work we've completed.
600          */
601         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
602             tg3_has_work(tp))
603                 tw32(HOSTCC_MODE, tp->coalesce_mode |
604                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
605 }
606
607 static inline void tg3_netif_stop(struct tg3 *tp)
608 {
609         tp->dev->trans_start = jiffies; /* prevent tx timeout */
610         netif_poll_disable(tp->dev);
611         netif_tx_disable(tp->dev);
612 }
613
614 static inline void tg3_netif_start(struct tg3 *tp)
615 {
616         netif_wake_queue(tp->dev);
617         /* NOTE: unconditional netif_wake_queue is only appropriate
618          * so long as all callers are assured to have free tx slots
619          * (such as after tg3_init_hw)
620          */
621         netif_poll_enable(tp->dev);
622         tp->hw_status->status |= SD_STATUS_UPDATED;
623         tg3_enable_ints(tp);
624 }
625
626 static void tg3_switch_clocks(struct tg3 *tp)
627 {
628         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
629         u32 orig_clock_ctrl;
630
631         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
632                 return;
633
634         orig_clock_ctrl = clock_ctrl;
635         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
636                        CLOCK_CTRL_CLKRUN_OENABLE |
637                        0x1f);
638         tp->pci_clock_ctrl = clock_ctrl;
639
640         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
641                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
642                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
643                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
644                 }
645         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
646                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
647                             clock_ctrl |
648                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
649                             40);
650                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
651                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
652                             40);
653         }
654         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
655 }
656
657 #define PHY_BUSY_LOOPS  5000
658
659 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
660 {
661         u32 frame_val;
662         unsigned int loops;
663         int ret;
664
665         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
666                 tw32_f(MAC_MI_MODE,
667                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
668                 udelay(80);
669         }
670
671         *val = 0x0;
672
673         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
674                       MI_COM_PHY_ADDR_MASK);
675         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
676                       MI_COM_REG_ADDR_MASK);
677         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
678         
679         tw32_f(MAC_MI_COM, frame_val);
680
681         loops = PHY_BUSY_LOOPS;
682         while (loops != 0) {
683                 udelay(10);
684                 frame_val = tr32(MAC_MI_COM);
685
686                 if ((frame_val & MI_COM_BUSY) == 0) {
687                         udelay(5);
688                         frame_val = tr32(MAC_MI_COM);
689                         break;
690                 }
691                 loops -= 1;
692         }
693
694         ret = -EBUSY;
695         if (loops != 0) {
696                 *val = frame_val & MI_COM_DATA_MASK;
697                 ret = 0;
698         }
699
700         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
701                 tw32_f(MAC_MI_MODE, tp->mi_mode);
702                 udelay(80);
703         }
704
705         return ret;
706 }
707
708 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
709 {
710         u32 frame_val;
711         unsigned int loops;
712         int ret;
713
714         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
715                 tw32_f(MAC_MI_MODE,
716                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
717                 udelay(80);
718         }
719
720         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
721                       MI_COM_PHY_ADDR_MASK);
722         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
723                       MI_COM_REG_ADDR_MASK);
724         frame_val |= (val & MI_COM_DATA_MASK);
725         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
726         
727         tw32_f(MAC_MI_COM, frame_val);
728
729         loops = PHY_BUSY_LOOPS;
730         while (loops != 0) {
731                 udelay(10);
732                 frame_val = tr32(MAC_MI_COM);
733                 if ((frame_val & MI_COM_BUSY) == 0) {
734                         udelay(5);
735                         frame_val = tr32(MAC_MI_COM);
736                         break;
737                 }
738                 loops -= 1;
739         }
740
741         ret = -EBUSY;
742         if (loops != 0)
743                 ret = 0;
744
745         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
746                 tw32_f(MAC_MI_MODE, tp->mi_mode);
747                 udelay(80);
748         }
749
750         return ret;
751 }
752
753 static void tg3_phy_set_wirespeed(struct tg3 *tp)
754 {
755         u32 val;
756
757         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
758                 return;
759
760         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
761             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
762                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
763                              (val | (1 << 15) | (1 << 4)));
764 }
765
766 static int tg3_bmcr_reset(struct tg3 *tp)
767 {
768         u32 phy_control;
769         int limit, err;
770
771         /* OK, reset it, and poll the BMCR_RESET bit until it
772          * clears or we time out.
773          */
774         phy_control = BMCR_RESET;
775         err = tg3_writephy(tp, MII_BMCR, phy_control);
776         if (err != 0)
777                 return -EBUSY;
778
779         limit = 5000;
780         while (limit--) {
781                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
782                 if (err != 0)
783                         return -EBUSY;
784
785                 if ((phy_control & BMCR_RESET) == 0) {
786                         udelay(40);
787                         break;
788                 }
789                 udelay(10);
790         }
791         if (limit <= 0)
792                 return -EBUSY;
793
794         return 0;
795 }
796
797 static int tg3_wait_macro_done(struct tg3 *tp)
798 {
799         int limit = 100;
800
801         while (limit--) {
802                 u32 tmp32;
803
804                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
805                         if ((tmp32 & 0x1000) == 0)
806                                 break;
807                 }
808         }
809         if (limit <= 0)
810                 return -EBUSY;
811
812         return 0;
813 }
814
815 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
816 {
817         static const u32 test_pat[4][6] = {
818         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
819         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
820         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
821         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
822         };
823         int chan;
824
825         for (chan = 0; chan < 4; chan++) {
826                 int i;
827
828                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
829                              (chan * 0x2000) | 0x0200);
830                 tg3_writephy(tp, 0x16, 0x0002);
831
832                 for (i = 0; i < 6; i++)
833                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
834                                      test_pat[chan][i]);
835
836                 tg3_writephy(tp, 0x16, 0x0202);
837                 if (tg3_wait_macro_done(tp)) {
838                         *resetp = 1;
839                         return -EBUSY;
840                 }
841
842                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
843                              (chan * 0x2000) | 0x0200);
844                 tg3_writephy(tp, 0x16, 0x0082);
845                 if (tg3_wait_macro_done(tp)) {
846                         *resetp = 1;
847                         return -EBUSY;
848                 }
849
850                 tg3_writephy(tp, 0x16, 0x0802);
851                 if (tg3_wait_macro_done(tp)) {
852                         *resetp = 1;
853                         return -EBUSY;
854                 }
855
856                 for (i = 0; i < 6; i += 2) {
857                         u32 low, high;
858
859                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
860                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
861                             tg3_wait_macro_done(tp)) {
862                                 *resetp = 1;
863                                 return -EBUSY;
864                         }
865                         low &= 0x7fff;
866                         high &= 0x000f;
867                         if (low != test_pat[chan][i] ||
868                             high != test_pat[chan][i+1]) {
869                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
870                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
871                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
872
873                                 return -EBUSY;
874                         }
875                 }
876         }
877
878         return 0;
879 }
880
881 static int tg3_phy_reset_chanpat(struct tg3 *tp)
882 {
883         int chan;
884
885         for (chan = 0; chan < 4; chan++) {
886                 int i;
887
888                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
889                              (chan * 0x2000) | 0x0200);
890                 tg3_writephy(tp, 0x16, 0x0002);
891                 for (i = 0; i < 6; i++)
892                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
893                 tg3_writephy(tp, 0x16, 0x0202);
894                 if (tg3_wait_macro_done(tp))
895                         return -EBUSY;
896         }
897
898         return 0;
899 }
900
901 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
902 {
903         u32 reg32, phy9_orig;
904         int retries, do_phy_reset, err;
905
906         retries = 10;
907         do_phy_reset = 1;
908         do {
909                 if (do_phy_reset) {
910                         err = tg3_bmcr_reset(tp);
911                         if (err)
912                                 return err;
913                         do_phy_reset = 0;
914                 }
915
916                 /* Disable transmitter and interrupt.  */
917                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
918                         continue;
919
920                 reg32 |= 0x3000;
921                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
922
923                 /* Set full-duplex, 1000 mbps.  */
924                 tg3_writephy(tp, MII_BMCR,
925                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
926
927                 /* Set to master mode.  */
928                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
929                         continue;
930
931                 tg3_writephy(tp, MII_TG3_CTRL,
932                              (MII_TG3_CTRL_AS_MASTER |
933                               MII_TG3_CTRL_ENABLE_AS_MASTER));
934
935                 /* Enable SM_DSP_CLOCK and 6dB.  */
936                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
937
938                 /* Block the PHY control access.  */
939                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
940                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
941
942                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
943                 if (!err)
944                         break;
945         } while (--retries);
946
947         err = tg3_phy_reset_chanpat(tp);
948         if (err)
949                 return err;
950
951         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
952         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
953
954         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
955         tg3_writephy(tp, 0x16, 0x0000);
956
957         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
959                 /* Set Extended packet length bit for jumbo frames */
960                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
961         }
962         else {
963                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
964         }
965
966         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
967
968         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
969                 reg32 &= ~0x3000;
970                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
971         } else if (!err)
972                 err = -EBUSY;
973
974         return err;
975 }
976
977 /* This will reset the tigon3 PHY if there is no valid
978  * link unless the FORCE argument is non-zero.
979  */
980 static int tg3_phy_reset(struct tg3 *tp)
981 {
982         u32 phy_status;
983         int err;
984
985         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
986         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
987         if (err != 0)
988                 return -EBUSY;
989
990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
991             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
992             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
993                 err = tg3_phy_reset_5703_4_5(tp);
994                 if (err)
995                         return err;
996                 goto out;
997         }
998
999         err = tg3_bmcr_reset(tp);
1000         if (err)
1001                 return err;
1002
1003 out:
1004         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1005                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1006                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1007                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1008                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1009                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1010                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1011         }
1012         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1013                 tg3_writephy(tp, 0x1c, 0x8d68);
1014                 tg3_writephy(tp, 0x1c, 0x8d68);
1015         }
1016         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1017                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1018                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1019                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1020                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1021                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1022                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1023                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1024                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1025         }
1026         /* Set Extended packet length bit (bit 14) on all chips that */
1027         /* support jumbo frames */
1028         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1029                 /* Cannot do read-modify-write on 5401 */
1030                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1031         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1032                 u32 phy_reg;
1033
1034                 /* Set bit 14 with read-modify-write to preserve other bits */
1035                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1036                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1037                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1038         }
1039
1040         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1041          * jumbo frames transmission.
1042          */
1043         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1044                 u32 phy_reg;
1045
1046                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1047                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1048                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1049         }
1050
1051         tg3_phy_set_wirespeed(tp);
1052         return 0;
1053 }
1054
1055 static void tg3_frob_aux_power(struct tg3 *tp)
1056 {
1057         struct tg3 *tp_peer = tp;
1058
1059         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1060                 return;
1061
1062         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1063             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1064                 struct net_device *dev_peer;
1065
1066                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1067                 /* remove_one() may have been run on the peer. */
1068                 if (!dev_peer)
1069                         tp_peer = tp;
1070                 else
1071                         tp_peer = netdev_priv(dev_peer);
1072         }
1073
1074         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1075             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1076             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1077             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1078                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1079                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1080                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1081                                     (GRC_LCLCTRL_GPIO_OE0 |
1082                                      GRC_LCLCTRL_GPIO_OE1 |
1083                                      GRC_LCLCTRL_GPIO_OE2 |
1084                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1085                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1086                                     100);
1087                 } else {
1088                         u32 no_gpio2;
1089                         u32 grc_local_ctrl = 0;
1090
1091                         if (tp_peer != tp &&
1092                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1093                                 return;
1094
1095                         /* Workaround to prevent overdrawing Amps. */
1096                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1097                             ASIC_REV_5714) {
1098                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1099                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1100                                             grc_local_ctrl, 100);
1101                         }
1102
1103                         /* On 5753 and variants, GPIO2 cannot be used. */
1104                         no_gpio2 = tp->nic_sram_data_cfg &
1105                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1106
1107                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1108                                          GRC_LCLCTRL_GPIO_OE1 |
1109                                          GRC_LCLCTRL_GPIO_OE2 |
1110                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1111                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1112                         if (no_gpio2) {
1113                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1114                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1115                         }
1116                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1117                                                     grc_local_ctrl, 100);
1118
1119                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1120
1121                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1122                                                     grc_local_ctrl, 100);
1123
1124                         if (!no_gpio2) {
1125                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1126                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1127                                             grc_local_ctrl, 100);
1128                         }
1129                 }
1130         } else {
1131                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1132                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1133                         if (tp_peer != tp &&
1134                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1135                                 return;
1136
1137                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1138                                     (GRC_LCLCTRL_GPIO_OE1 |
1139                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1140
1141                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1142                                     GRC_LCLCTRL_GPIO_OE1, 100);
1143
1144                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1145                                     (GRC_LCLCTRL_GPIO_OE1 |
1146                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1147                 }
1148         }
1149 }
1150
1151 static int tg3_setup_phy(struct tg3 *, int);
1152
1153 #define RESET_KIND_SHUTDOWN     0
1154 #define RESET_KIND_INIT         1
1155 #define RESET_KIND_SUSPEND      2
1156
1157 static void tg3_write_sig_post_reset(struct tg3 *, int);
1158 static int tg3_halt_cpu(struct tg3 *, u32);
1159 static int tg3_nvram_lock(struct tg3 *);
1160 static void tg3_nvram_unlock(struct tg3 *);
1161
1162 static void tg3_power_down_phy(struct tg3 *tp)
1163 {
1164         /* The PHY should not be powered down on some chips because
1165          * of bugs.
1166          */
1167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1168             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1169             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1170              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1171                 return;
1172         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1173 }
1174
1175 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1176 {
1177         u32 misc_host_ctrl;
1178         u16 power_control, power_caps;
1179         int pm = tp->pm_cap;
1180
1181         /* Make sure register accesses (indirect or otherwise)
1182          * will function correctly.
1183          */
1184         pci_write_config_dword(tp->pdev,
1185                                TG3PCI_MISC_HOST_CTRL,
1186                                tp->misc_host_ctrl);
1187
1188         pci_read_config_word(tp->pdev,
1189                              pm + PCI_PM_CTRL,
1190                              &power_control);
1191         power_control |= PCI_PM_CTRL_PME_STATUS;
1192         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1193         switch (state) {
1194         case PCI_D0:
1195                 power_control |= 0;
1196                 pci_write_config_word(tp->pdev,
1197                                       pm + PCI_PM_CTRL,
1198                                       power_control);
1199                 udelay(100);    /* Delay after power state change */
1200
1201                 /* Switch out of Vaux if it is not a LOM */
1202                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1203                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1204
1205                 return 0;
1206
1207         case PCI_D1:
1208                 power_control |= 1;
1209                 break;
1210
1211         case PCI_D2:
1212                 power_control |= 2;
1213                 break;
1214
1215         case PCI_D3hot:
1216                 power_control |= 3;
1217                 break;
1218
1219         default:
1220                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1221                        "requested.\n",
1222                        tp->dev->name, state);
1223                 return -EINVAL;
1224         };
1225
1226         power_control |= PCI_PM_CTRL_PME_ENABLE;
1227
1228         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1229         tw32(TG3PCI_MISC_HOST_CTRL,
1230              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1231
1232         if (tp->link_config.phy_is_low_power == 0) {
1233                 tp->link_config.phy_is_low_power = 1;
1234                 tp->link_config.orig_speed = tp->link_config.speed;
1235                 tp->link_config.orig_duplex = tp->link_config.duplex;
1236                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1237         }
1238
1239         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1240                 tp->link_config.speed = SPEED_10;
1241                 tp->link_config.duplex = DUPLEX_HALF;
1242                 tp->link_config.autoneg = AUTONEG_ENABLE;
1243                 tg3_setup_phy(tp, 0);
1244         }
1245
1246         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1247                 int i;
1248                 u32 val;
1249
1250                 for (i = 0; i < 200; i++) {
1251                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1252                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1253                                 break;
1254                         msleep(1);
1255                 }
1256         }
1257         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1258                                              WOL_DRV_STATE_SHUTDOWN |
1259                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1260
1261         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1262
1263         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1264                 u32 mac_mode;
1265
1266                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1267                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1268                         udelay(40);
1269
1270                         mac_mode = MAC_MODE_PORT_MODE_MII;
1271
1272                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1273                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1274                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1275                 } else {
1276                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1277                 }
1278
1279                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1280                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1281
1282                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1283                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1284                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1285
1286                 tw32_f(MAC_MODE, mac_mode);
1287                 udelay(100);
1288
1289                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1290                 udelay(10);
1291         }
1292
1293         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1294             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1295              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1296                 u32 base_val;
1297
1298                 base_val = tp->pci_clock_ctrl;
1299                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1300                              CLOCK_CTRL_TXCLK_DISABLE);
1301
1302                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1303                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1304         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1305                 /* do nothing */
1306         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1307                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1308                 u32 newbits1, newbits2;
1309
1310                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1311                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1312                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1313                                     CLOCK_CTRL_TXCLK_DISABLE |
1314                                     CLOCK_CTRL_ALTCLK);
1315                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1316                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1317                         newbits1 = CLOCK_CTRL_625_CORE;
1318                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1319                 } else {
1320                         newbits1 = CLOCK_CTRL_ALTCLK;
1321                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1322                 }
1323
1324                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1325                             40);
1326
1327                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1328                             40);
1329
1330                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1331                         u32 newbits3;
1332
1333                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1334                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1335                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1336                                             CLOCK_CTRL_TXCLK_DISABLE |
1337                                             CLOCK_CTRL_44MHZ_CORE);
1338                         } else {
1339                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1340                         }
1341
1342                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1343                                     tp->pci_clock_ctrl | newbits3, 40);
1344                 }
1345         }
1346
1347         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1348             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1349                 /* Turn off the PHY */
1350                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1351                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1352                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1353                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1354                         tg3_power_down_phy(tp);
1355                 }
1356         }
1357
1358         tg3_frob_aux_power(tp);
1359
1360         /* Workaround for unstable PLL clock */
1361         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1362             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1363                 u32 val = tr32(0x7d00);
1364
1365                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1366                 tw32(0x7d00, val);
1367                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1368                         int err;
1369
1370                         err = tg3_nvram_lock(tp);
1371                         tg3_halt_cpu(tp, RX_CPU_BASE);
1372                         if (!err)
1373                                 tg3_nvram_unlock(tp);
1374                 }
1375         }
1376
1377         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1378
1379         /* Finally, set the new power state. */
1380         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1381         udelay(100);    /* Delay after power state change */
1382
1383         return 0;
1384 }
1385
1386 static void tg3_link_report(struct tg3 *tp)
1387 {
1388         if (!netif_carrier_ok(tp->dev)) {
1389                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1390         } else {
1391                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1392                        tp->dev->name,
1393                        (tp->link_config.active_speed == SPEED_1000 ?
1394                         1000 :
1395                         (tp->link_config.active_speed == SPEED_100 ?
1396                          100 : 10)),
1397                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1398                         "full" : "half"));
1399
1400                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1401                        "%s for RX.\n",
1402                        tp->dev->name,
1403                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1404                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1405         }
1406 }
1407
1408 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1409 {
1410         u32 new_tg3_flags = 0;
1411         u32 old_rx_mode = tp->rx_mode;
1412         u32 old_tx_mode = tp->tx_mode;
1413
1414         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1415
1416                 /* Convert 1000BaseX flow control bits to 1000BaseT
1417                  * bits before resolving flow control.
1418                  */
1419                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1420                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1421                                        ADVERTISE_PAUSE_ASYM);
1422                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1423
1424                         if (local_adv & ADVERTISE_1000XPAUSE)
1425                                 local_adv |= ADVERTISE_PAUSE_CAP;
1426                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1427                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1428                         if (remote_adv & LPA_1000XPAUSE)
1429                                 remote_adv |= LPA_PAUSE_CAP;
1430                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1431                                 remote_adv |= LPA_PAUSE_ASYM;
1432                 }
1433
1434                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1435                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1436                                 if (remote_adv & LPA_PAUSE_CAP)
1437                                         new_tg3_flags |=
1438                                                 (TG3_FLAG_RX_PAUSE |
1439                                                 TG3_FLAG_TX_PAUSE);
1440                                 else if (remote_adv & LPA_PAUSE_ASYM)
1441                                         new_tg3_flags |=
1442                                                 (TG3_FLAG_RX_PAUSE);
1443                         } else {
1444                                 if (remote_adv & LPA_PAUSE_CAP)
1445                                         new_tg3_flags |=
1446                                                 (TG3_FLAG_RX_PAUSE |
1447                                                 TG3_FLAG_TX_PAUSE);
1448                         }
1449                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1450                         if ((remote_adv & LPA_PAUSE_CAP) &&
1451                         (remote_adv & LPA_PAUSE_ASYM))
1452                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1453                 }
1454
1455                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1456                 tp->tg3_flags |= new_tg3_flags;
1457         } else {
1458                 new_tg3_flags = tp->tg3_flags;
1459         }
1460
1461         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1462                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1463         else
1464                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1465
1466         if (old_rx_mode != tp->rx_mode) {
1467                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1468         }
1469         
1470         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1471                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1472         else
1473                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1474
1475         if (old_tx_mode != tp->tx_mode) {
1476                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1477         }
1478 }
1479
1480 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1481 {
1482         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1483         case MII_TG3_AUX_STAT_10HALF:
1484                 *speed = SPEED_10;
1485                 *duplex = DUPLEX_HALF;
1486                 break;
1487
1488         case MII_TG3_AUX_STAT_10FULL:
1489                 *speed = SPEED_10;
1490                 *duplex = DUPLEX_FULL;
1491                 break;
1492
1493         case MII_TG3_AUX_STAT_100HALF:
1494                 *speed = SPEED_100;
1495                 *duplex = DUPLEX_HALF;
1496                 break;
1497
1498         case MII_TG3_AUX_STAT_100FULL:
1499                 *speed = SPEED_100;
1500                 *duplex = DUPLEX_FULL;
1501                 break;
1502
1503         case MII_TG3_AUX_STAT_1000HALF:
1504                 *speed = SPEED_1000;
1505                 *duplex = DUPLEX_HALF;
1506                 break;
1507
1508         case MII_TG3_AUX_STAT_1000FULL:
1509                 *speed = SPEED_1000;
1510                 *duplex = DUPLEX_FULL;
1511                 break;
1512
1513         default:
1514                 *speed = SPEED_INVALID;
1515                 *duplex = DUPLEX_INVALID;
1516                 break;
1517         };
1518 }
1519
1520 static void tg3_phy_copper_begin(struct tg3 *tp)
1521 {
1522         u32 new_adv;
1523         int i;
1524
1525         if (tp->link_config.phy_is_low_power) {
1526                 /* Entering low power mode.  Disable gigabit and
1527                  * 100baseT advertisements.
1528                  */
1529                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1530
1531                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1532                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1533                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1534                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1535
1536                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1537         } else if (tp->link_config.speed == SPEED_INVALID) {
1538                 tp->link_config.advertising =
1539                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1540                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1541                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1542                          ADVERTISED_Autoneg | ADVERTISED_MII);
1543
1544                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1545                         tp->link_config.advertising &=
1546                                 ~(ADVERTISED_1000baseT_Half |
1547                                   ADVERTISED_1000baseT_Full);
1548
1549                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1550                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1551                         new_adv |= ADVERTISE_10HALF;
1552                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1553                         new_adv |= ADVERTISE_10FULL;
1554                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1555                         new_adv |= ADVERTISE_100HALF;
1556                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1557                         new_adv |= ADVERTISE_100FULL;
1558                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1559
1560                 if (tp->link_config.advertising &
1561                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1562                         new_adv = 0;
1563                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1564                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1565                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1566                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1567                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1568                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1569                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1570                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1571                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1572                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1573                 } else {
1574                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1575                 }
1576         } else {
1577                 /* Asking for a specific link mode. */
1578                 if (tp->link_config.speed == SPEED_1000) {
1579                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1580                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1581
1582                         if (tp->link_config.duplex == DUPLEX_FULL)
1583                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1584                         else
1585                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1586                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1587                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1588                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1589                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1590                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1591                 } else {
1592                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1593
1594                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1595                         if (tp->link_config.speed == SPEED_100) {
1596                                 if (tp->link_config.duplex == DUPLEX_FULL)
1597                                         new_adv |= ADVERTISE_100FULL;
1598                                 else
1599                                         new_adv |= ADVERTISE_100HALF;
1600                         } else {
1601                                 if (tp->link_config.duplex == DUPLEX_FULL)
1602                                         new_adv |= ADVERTISE_10FULL;
1603                                 else
1604                                         new_adv |= ADVERTISE_10HALF;
1605                         }
1606                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1607                 }
1608         }
1609
1610         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1611             tp->link_config.speed != SPEED_INVALID) {
1612                 u32 bmcr, orig_bmcr;
1613
1614                 tp->link_config.active_speed = tp->link_config.speed;
1615                 tp->link_config.active_duplex = tp->link_config.duplex;
1616
1617                 bmcr = 0;
1618                 switch (tp->link_config.speed) {
1619                 default:
1620                 case SPEED_10:
1621                         break;
1622
1623                 case SPEED_100:
1624                         bmcr |= BMCR_SPEED100;
1625                         break;
1626
1627                 case SPEED_1000:
1628                         bmcr |= TG3_BMCR_SPEED1000;
1629                         break;
1630                 };
1631
1632                 if (tp->link_config.duplex == DUPLEX_FULL)
1633                         bmcr |= BMCR_FULLDPLX;
1634
1635                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1636                     (bmcr != orig_bmcr)) {
1637                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1638                         for (i = 0; i < 1500; i++) {
1639                                 u32 tmp;
1640
1641                                 udelay(10);
1642                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1643                                     tg3_readphy(tp, MII_BMSR, &tmp))
1644                                         continue;
1645                                 if (!(tmp & BMSR_LSTATUS)) {
1646                                         udelay(40);
1647                                         break;
1648                                 }
1649                         }
1650                         tg3_writephy(tp, MII_BMCR, bmcr);
1651                         udelay(40);
1652                 }
1653         } else {
1654                 tg3_writephy(tp, MII_BMCR,
1655                              BMCR_ANENABLE | BMCR_ANRESTART);
1656         }
1657 }
1658
1659 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1660 {
1661         int err;
1662
1663         /* Turn off tap power management. */
1664         /* Set Extended packet length bit */
1665         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1666
1667         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1668         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1669
1670         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1671         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1672
1673         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1674         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1675
1676         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1677         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1678
1679         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1680         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1681
1682         udelay(40);
1683
1684         return err;
1685 }
1686
1687 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1688 {
1689         u32 adv_reg, all_mask;
1690
1691         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1692                 return 0;
1693
1694         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1695                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1696         if ((adv_reg & all_mask) != all_mask)
1697                 return 0;
1698         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1699                 u32 tg3_ctrl;
1700
1701                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1702                         return 0;
1703
1704                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1705                             MII_TG3_CTRL_ADV_1000_FULL);
1706                 if ((tg3_ctrl & all_mask) != all_mask)
1707                         return 0;
1708         }
1709         return 1;
1710 }
1711
1712 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1713 {
1714         int current_link_up;
1715         u32 bmsr, dummy;
1716         u16 current_speed;
1717         u8 current_duplex;
1718         int i, err;
1719
1720         tw32(MAC_EVENT, 0);
1721
1722         tw32_f(MAC_STATUS,
1723              (MAC_STATUS_SYNC_CHANGED |
1724               MAC_STATUS_CFG_CHANGED |
1725               MAC_STATUS_MI_COMPLETION |
1726               MAC_STATUS_LNKSTATE_CHANGED));
1727         udelay(40);
1728
1729         tp->mi_mode = MAC_MI_MODE_BASE;
1730         tw32_f(MAC_MI_MODE, tp->mi_mode);
1731         udelay(80);
1732
1733         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1734
1735         /* Some third-party PHYs need to be reset on link going
1736          * down.
1737          */
1738         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1739              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1740              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1741             netif_carrier_ok(tp->dev)) {
1742                 tg3_readphy(tp, MII_BMSR, &bmsr);
1743                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1744                     !(bmsr & BMSR_LSTATUS))
1745                         force_reset = 1;
1746         }
1747         if (force_reset)
1748                 tg3_phy_reset(tp);
1749
1750         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1751                 tg3_readphy(tp, MII_BMSR, &bmsr);
1752                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1753                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1754                         bmsr = 0;
1755
1756                 if (!(bmsr & BMSR_LSTATUS)) {
1757                         err = tg3_init_5401phy_dsp(tp);
1758                         if (err)
1759                                 return err;
1760
1761                         tg3_readphy(tp, MII_BMSR, &bmsr);
1762                         for (i = 0; i < 1000; i++) {
1763                                 udelay(10);
1764                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1765                                     (bmsr & BMSR_LSTATUS)) {
1766                                         udelay(40);
1767                                         break;
1768                                 }
1769                         }
1770
1771                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1772                             !(bmsr & BMSR_LSTATUS) &&
1773                             tp->link_config.active_speed == SPEED_1000) {
1774                                 err = tg3_phy_reset(tp);
1775                                 if (!err)
1776                                         err = tg3_init_5401phy_dsp(tp);
1777                                 if (err)
1778                                         return err;
1779                         }
1780                 }
1781         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1782                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1783                 /* 5701 {A0,B0} CRC bug workaround */
1784                 tg3_writephy(tp, 0x15, 0x0a75);
1785                 tg3_writephy(tp, 0x1c, 0x8c68);
1786                 tg3_writephy(tp, 0x1c, 0x8d68);
1787                 tg3_writephy(tp, 0x1c, 0x8c68);
1788         }
1789
1790         /* Clear pending interrupts... */
1791         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1792         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1793
1794         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1795                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1796         else
1797                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1798
1799         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1800             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1801                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1802                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1803                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1804                 else
1805                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1806         }
1807
1808         current_link_up = 0;
1809         current_speed = SPEED_INVALID;
1810         current_duplex = DUPLEX_INVALID;
1811
1812         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1813                 u32 val;
1814
1815                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1816                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1817                 if (!(val & (1 << 10))) {
1818                         val |= (1 << 10);
1819                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1820                         goto relink;
1821                 }
1822         }
1823
1824         bmsr = 0;
1825         for (i = 0; i < 100; i++) {
1826                 tg3_readphy(tp, MII_BMSR, &bmsr);
1827                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1828                     (bmsr & BMSR_LSTATUS))
1829                         break;
1830                 udelay(40);
1831         }
1832
1833         if (bmsr & BMSR_LSTATUS) {
1834                 u32 aux_stat, bmcr;
1835
1836                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1837                 for (i = 0; i < 2000; i++) {
1838                         udelay(10);
1839                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1840                             aux_stat)
1841                                 break;
1842                 }
1843
1844                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1845                                              &current_speed,
1846                                              &current_duplex);
1847
1848                 bmcr = 0;
1849                 for (i = 0; i < 200; i++) {
1850                         tg3_readphy(tp, MII_BMCR, &bmcr);
1851                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1852                                 continue;
1853                         if (bmcr && bmcr != 0x7fff)
1854                                 break;
1855                         udelay(10);
1856                 }
1857
1858                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1859                         if (bmcr & BMCR_ANENABLE) {
1860                                 current_link_up = 1;
1861
1862                                 /* Force autoneg restart if we are exiting
1863                                  * low power mode.
1864                                  */
1865                                 if (!tg3_copper_is_advertising_all(tp))
1866                                         current_link_up = 0;
1867                         } else {
1868                                 current_link_up = 0;
1869                         }
1870                 } else {
1871                         if (!(bmcr & BMCR_ANENABLE) &&
1872                             tp->link_config.speed == current_speed &&
1873                             tp->link_config.duplex == current_duplex) {
1874                                 current_link_up = 1;
1875                         } else {
1876                                 current_link_up = 0;
1877                         }
1878                 }
1879
1880                 tp->link_config.active_speed = current_speed;
1881                 tp->link_config.active_duplex = current_duplex;
1882         }
1883
1884         if (current_link_up == 1 &&
1885             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1886             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1887                 u32 local_adv, remote_adv;
1888
1889                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1890                         local_adv = 0;
1891                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1892
1893                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1894                         remote_adv = 0;
1895
1896                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1897
1898                 /* If we are not advertising full pause capability,
1899                  * something is wrong.  Bring the link down and reconfigure.
1900                  */
1901                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1902                         current_link_up = 0;
1903                 } else {
1904                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1905                 }
1906         }
1907 relink:
1908         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1909                 u32 tmp;
1910
1911                 tg3_phy_copper_begin(tp);
1912
1913                 tg3_readphy(tp, MII_BMSR, &tmp);
1914                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1915                     (tmp & BMSR_LSTATUS))
1916                         current_link_up = 1;
1917         }
1918
1919         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1920         if (current_link_up == 1) {
1921                 if (tp->link_config.active_speed == SPEED_100 ||
1922                     tp->link_config.active_speed == SPEED_10)
1923                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1924                 else
1925                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1926         } else
1927                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1928
1929         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1930         if (tp->link_config.active_duplex == DUPLEX_HALF)
1931                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1932
1933         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1935                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1936                     (current_link_up == 1 &&
1937                      tp->link_config.active_speed == SPEED_10))
1938                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1939         } else {
1940                 if (current_link_up == 1)
1941                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1942         }
1943
1944         /* ??? Without this setting Netgear GA302T PHY does not
1945          * ??? send/receive packets...
1946          */
1947         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1948             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1949                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1950                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1951                 udelay(80);
1952         }
1953
1954         tw32_f(MAC_MODE, tp->mac_mode);
1955         udelay(40);
1956
1957         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1958                 /* Polled via timer. */
1959                 tw32_f(MAC_EVENT, 0);
1960         } else {
1961                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1962         }
1963         udelay(40);
1964
1965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1966             current_link_up == 1 &&
1967             tp->link_config.active_speed == SPEED_1000 &&
1968             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1969              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1970                 udelay(120);
1971                 tw32_f(MAC_STATUS,
1972                      (MAC_STATUS_SYNC_CHANGED |
1973                       MAC_STATUS_CFG_CHANGED));
1974                 udelay(40);
1975                 tg3_write_mem(tp,
1976                               NIC_SRAM_FIRMWARE_MBOX,
1977                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1978         }
1979
1980         if (current_link_up != netif_carrier_ok(tp->dev)) {
1981                 if (current_link_up)
1982                         netif_carrier_on(tp->dev);
1983                 else
1984                         netif_carrier_off(tp->dev);
1985                 tg3_link_report(tp);
1986         }
1987
1988         return 0;
1989 }
1990
1991 struct tg3_fiber_aneginfo {
1992         int state;
1993 #define ANEG_STATE_UNKNOWN              0
1994 #define ANEG_STATE_AN_ENABLE            1
1995 #define ANEG_STATE_RESTART_INIT         2
1996 #define ANEG_STATE_RESTART              3
1997 #define ANEG_STATE_DISABLE_LINK_OK      4
1998 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1999 #define ANEG_STATE_ABILITY_DETECT       6
2000 #define ANEG_STATE_ACK_DETECT_INIT      7
2001 #define ANEG_STATE_ACK_DETECT           8
2002 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2003 #define ANEG_STATE_COMPLETE_ACK         10
2004 #define ANEG_STATE_IDLE_DETECT_INIT     11
2005 #define ANEG_STATE_IDLE_DETECT          12
2006 #define ANEG_STATE_LINK_OK              13
2007 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2008 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2009
2010         u32 flags;
2011 #define MR_AN_ENABLE            0x00000001
2012 #define MR_RESTART_AN           0x00000002
2013 #define MR_AN_COMPLETE          0x00000004
2014 #define MR_PAGE_RX              0x00000008
2015 #define MR_NP_LOADED            0x00000010
2016 #define MR_TOGGLE_TX            0x00000020
2017 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2018 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2019 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2020 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2021 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2022 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2023 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2024 #define MR_TOGGLE_RX            0x00002000
2025 #define MR_NP_RX                0x00004000
2026
2027 #define MR_LINK_OK              0x80000000
2028
2029         unsigned long link_time, cur_time;
2030
2031         u32 ability_match_cfg;
2032         int ability_match_count;
2033
2034         char ability_match, idle_match, ack_match;
2035
2036         u32 txconfig, rxconfig;
2037 #define ANEG_CFG_NP             0x00000080
2038 #define ANEG_CFG_ACK            0x00000040
2039 #define ANEG_CFG_RF2            0x00000020
2040 #define ANEG_CFG_RF1            0x00000010
2041 #define ANEG_CFG_PS2            0x00000001
2042 #define ANEG_CFG_PS1            0x00008000
2043 #define ANEG_CFG_HD             0x00004000
2044 #define ANEG_CFG_FD             0x00002000
2045 #define ANEG_CFG_INVAL          0x00001f06
2046
2047 };
2048 #define ANEG_OK         0
2049 #define ANEG_DONE       1
2050 #define ANEG_TIMER_ENAB 2
2051 #define ANEG_FAILED     -1
2052
2053 #define ANEG_STATE_SETTLE_TIME  10000
2054
2055 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2056                                    struct tg3_fiber_aneginfo *ap)
2057 {
2058         unsigned long delta;
2059         u32 rx_cfg_reg;
2060         int ret;
2061
2062         if (ap->state == ANEG_STATE_UNKNOWN) {
2063                 ap->rxconfig = 0;
2064                 ap->link_time = 0;
2065                 ap->cur_time = 0;
2066                 ap->ability_match_cfg = 0;
2067                 ap->ability_match_count = 0;
2068                 ap->ability_match = 0;
2069                 ap->idle_match = 0;
2070                 ap->ack_match = 0;
2071         }
2072         ap->cur_time++;
2073
2074         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2075                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2076
2077                 if (rx_cfg_reg != ap->ability_match_cfg) {
2078                         ap->ability_match_cfg = rx_cfg_reg;
2079                         ap->ability_match = 0;
2080                         ap->ability_match_count = 0;
2081                 } else {
2082                         if (++ap->ability_match_count > 1) {
2083                                 ap->ability_match = 1;
2084                                 ap->ability_match_cfg = rx_cfg_reg;
2085                         }
2086                 }
2087                 if (rx_cfg_reg & ANEG_CFG_ACK)
2088                         ap->ack_match = 1;
2089                 else
2090                         ap->ack_match = 0;
2091
2092                 ap->idle_match = 0;
2093         } else {
2094                 ap->idle_match = 1;
2095                 ap->ability_match_cfg = 0;
2096                 ap->ability_match_count = 0;
2097                 ap->ability_match = 0;
2098                 ap->ack_match = 0;
2099
2100                 rx_cfg_reg = 0;
2101         }
2102
2103         ap->rxconfig = rx_cfg_reg;
2104         ret = ANEG_OK;
2105
2106         switch(ap->state) {
2107         case ANEG_STATE_UNKNOWN:
2108                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2109                         ap->state = ANEG_STATE_AN_ENABLE;
2110
2111                 /* fallthru */
2112         case ANEG_STATE_AN_ENABLE:
2113                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2114                 if (ap->flags & MR_AN_ENABLE) {
2115                         ap->link_time = 0;
2116                         ap->cur_time = 0;
2117                         ap->ability_match_cfg = 0;
2118                         ap->ability_match_count = 0;
2119                         ap->ability_match = 0;
2120                         ap->idle_match = 0;
2121                         ap->ack_match = 0;
2122
2123                         ap->state = ANEG_STATE_RESTART_INIT;
2124                 } else {
2125                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2126                 }
2127                 break;
2128
2129         case ANEG_STATE_RESTART_INIT:
2130                 ap->link_time = ap->cur_time;
2131                 ap->flags &= ~(MR_NP_LOADED);
2132                 ap->txconfig = 0;
2133                 tw32(MAC_TX_AUTO_NEG, 0);
2134                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2135                 tw32_f(MAC_MODE, tp->mac_mode);
2136                 udelay(40);
2137
2138                 ret = ANEG_TIMER_ENAB;
2139                 ap->state = ANEG_STATE_RESTART;
2140
2141                 /* fallthru */
2142         case ANEG_STATE_RESTART:
2143                 delta = ap->cur_time - ap->link_time;
2144                 if (delta > ANEG_STATE_SETTLE_TIME) {
2145                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2146                 } else {
2147                         ret = ANEG_TIMER_ENAB;
2148                 }
2149                 break;
2150
2151         case ANEG_STATE_DISABLE_LINK_OK:
2152                 ret = ANEG_DONE;
2153                 break;
2154
2155         case ANEG_STATE_ABILITY_DETECT_INIT:
2156                 ap->flags &= ~(MR_TOGGLE_TX);
2157                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2158                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2159                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2160                 tw32_f(MAC_MODE, tp->mac_mode);
2161                 udelay(40);
2162
2163                 ap->state = ANEG_STATE_ABILITY_DETECT;
2164                 break;
2165
2166         case ANEG_STATE_ABILITY_DETECT:
2167                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2168                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2169                 }
2170                 break;
2171
2172         case ANEG_STATE_ACK_DETECT_INIT:
2173                 ap->txconfig |= ANEG_CFG_ACK;
2174                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2175                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2176                 tw32_f(MAC_MODE, tp->mac_mode);
2177                 udelay(40);
2178
2179                 ap->state = ANEG_STATE_ACK_DETECT;
2180
2181                 /* fallthru */
2182         case ANEG_STATE_ACK_DETECT:
2183                 if (ap->ack_match != 0) {
2184                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2185                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2186                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2187                         } else {
2188                                 ap->state = ANEG_STATE_AN_ENABLE;
2189                         }
2190                 } else if (ap->ability_match != 0 &&
2191                            ap->rxconfig == 0) {
2192                         ap->state = ANEG_STATE_AN_ENABLE;
2193                 }
2194                 break;
2195
2196         case ANEG_STATE_COMPLETE_ACK_INIT:
2197                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2198                         ret = ANEG_FAILED;
2199                         break;
2200                 }
2201                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2202                                MR_LP_ADV_HALF_DUPLEX |
2203                                MR_LP_ADV_SYM_PAUSE |
2204                                MR_LP_ADV_ASYM_PAUSE |
2205                                MR_LP_ADV_REMOTE_FAULT1 |
2206                                MR_LP_ADV_REMOTE_FAULT2 |
2207                                MR_LP_ADV_NEXT_PAGE |
2208                                MR_TOGGLE_RX |
2209                                MR_NP_RX);
2210                 if (ap->rxconfig & ANEG_CFG_FD)
2211                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2212                 if (ap->rxconfig & ANEG_CFG_HD)
2213                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2214                 if (ap->rxconfig & ANEG_CFG_PS1)
2215                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2216                 if (ap->rxconfig & ANEG_CFG_PS2)
2217                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2218                 if (ap->rxconfig & ANEG_CFG_RF1)
2219                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2220                 if (ap->rxconfig & ANEG_CFG_RF2)
2221                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2222                 if (ap->rxconfig & ANEG_CFG_NP)
2223                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2224
2225                 ap->link_time = ap->cur_time;
2226
2227                 ap->flags ^= (MR_TOGGLE_TX);
2228                 if (ap->rxconfig & 0x0008)
2229                         ap->flags |= MR_TOGGLE_RX;
2230                 if (ap->rxconfig & ANEG_CFG_NP)
2231                         ap->flags |= MR_NP_RX;
2232                 ap->flags |= MR_PAGE_RX;
2233
2234                 ap->state = ANEG_STATE_COMPLETE_ACK;
2235                 ret = ANEG_TIMER_ENAB;
2236                 break;
2237
2238         case ANEG_STATE_COMPLETE_ACK:
2239                 if (ap->ability_match != 0 &&
2240                     ap->rxconfig == 0) {
2241                         ap->state = ANEG_STATE_AN_ENABLE;
2242                         break;
2243                 }
2244                 delta = ap->cur_time - ap->link_time;
2245                 if (delta > ANEG_STATE_SETTLE_TIME) {
2246                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2247                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2248                         } else {
2249                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2250                                     !(ap->flags & MR_NP_RX)) {
2251                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2252                                 } else {
2253                                         ret = ANEG_FAILED;
2254                                 }
2255                         }
2256                 }
2257                 break;
2258
2259         case ANEG_STATE_IDLE_DETECT_INIT:
2260                 ap->link_time = ap->cur_time;
2261                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2262                 tw32_f(MAC_MODE, tp->mac_mode);
2263                 udelay(40);
2264
2265                 ap->state = ANEG_STATE_IDLE_DETECT;
2266                 ret = ANEG_TIMER_ENAB;
2267                 break;
2268
2269         case ANEG_STATE_IDLE_DETECT:
2270                 if (ap->ability_match != 0 &&
2271                     ap->rxconfig == 0) {
2272                         ap->state = ANEG_STATE_AN_ENABLE;
2273                         break;
2274                 }
2275                 delta = ap->cur_time - ap->link_time;
2276                 if (delta > ANEG_STATE_SETTLE_TIME) {
2277                         /* XXX another gem from the Broadcom driver :( */
2278                         ap->state = ANEG_STATE_LINK_OK;
2279                 }
2280                 break;
2281
2282         case ANEG_STATE_LINK_OK:
2283                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2284                 ret = ANEG_DONE;
2285                 break;
2286
2287         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2288                 /* ??? unimplemented */
2289                 break;
2290
2291         case ANEG_STATE_NEXT_PAGE_WAIT:
2292                 /* ??? unimplemented */
2293                 break;
2294
2295         default:
2296                 ret = ANEG_FAILED;
2297                 break;
2298         };
2299
2300         return ret;
2301 }
2302
2303 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2304 {
2305         int res = 0;
2306         struct tg3_fiber_aneginfo aninfo;
2307         int status = ANEG_FAILED;
2308         unsigned int tick;
2309         u32 tmp;
2310
2311         tw32_f(MAC_TX_AUTO_NEG, 0);
2312
2313         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2314         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2315         udelay(40);
2316
2317         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2318         udelay(40);
2319
2320         memset(&aninfo, 0, sizeof(aninfo));
2321         aninfo.flags |= MR_AN_ENABLE;
2322         aninfo.state = ANEG_STATE_UNKNOWN;
2323         aninfo.cur_time = 0;
2324         tick = 0;
2325         while (++tick < 195000) {
2326                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2327                 if (status == ANEG_DONE || status == ANEG_FAILED)
2328                         break;
2329
2330                 udelay(1);
2331         }
2332
2333         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2334         tw32_f(MAC_MODE, tp->mac_mode);
2335         udelay(40);
2336
2337         *flags = aninfo.flags;
2338
2339         if (status == ANEG_DONE &&
2340             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2341                              MR_LP_ADV_FULL_DUPLEX)))
2342                 res = 1;
2343
2344         return res;
2345 }
2346
2347 static void tg3_init_bcm8002(struct tg3 *tp)
2348 {
2349         u32 mac_status = tr32(MAC_STATUS);
2350         int i;
2351
2352         /* Reset when initting first time or we have a link. */
2353         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2354             !(mac_status & MAC_STATUS_PCS_SYNCED))
2355                 return;
2356
2357         /* Set PLL lock range. */
2358         tg3_writephy(tp, 0x16, 0x8007);
2359
2360         /* SW reset */
2361         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2362
2363         /* Wait for reset to complete. */
2364         /* XXX schedule_timeout() ... */
2365         for (i = 0; i < 500; i++)
2366                 udelay(10);
2367
2368         /* Config mode; select PMA/Ch 1 regs. */
2369         tg3_writephy(tp, 0x10, 0x8411);
2370
2371         /* Enable auto-lock and comdet, select txclk for tx. */
2372         tg3_writephy(tp, 0x11, 0x0a10);
2373
2374         tg3_writephy(tp, 0x18, 0x00a0);
2375         tg3_writephy(tp, 0x16, 0x41ff);
2376
2377         /* Assert and deassert POR. */
2378         tg3_writephy(tp, 0x13, 0x0400);
2379         udelay(40);
2380         tg3_writephy(tp, 0x13, 0x0000);
2381
2382         tg3_writephy(tp, 0x11, 0x0a50);
2383         udelay(40);
2384         tg3_writephy(tp, 0x11, 0x0a10);
2385
2386         /* Wait for signal to stabilize */
2387         /* XXX schedule_timeout() ... */
2388         for (i = 0; i < 15000; i++)
2389                 udelay(10);
2390
2391         /* Deselect the channel register so we can read the PHYID
2392          * later.
2393          */
2394         tg3_writephy(tp, 0x10, 0x8011);
2395 }
2396
2397 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2398 {
2399         u32 sg_dig_ctrl, sg_dig_status;
2400         u32 serdes_cfg, expected_sg_dig_ctrl;
2401         int workaround, port_a;
2402         int current_link_up;
2403
2404         serdes_cfg = 0;
2405         expected_sg_dig_ctrl = 0;
2406         workaround = 0;
2407         port_a = 1;
2408         current_link_up = 0;
2409
2410         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2411             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2412                 workaround = 1;
2413                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2414                         port_a = 0;
2415
2416                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2417                 /* preserve bits 20-23 for voltage regulator */
2418                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2419         }
2420
2421         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2422
2423         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2424                 if (sg_dig_ctrl & (1 << 31)) {
2425                         if (workaround) {
2426                                 u32 val = serdes_cfg;
2427
2428                                 if (port_a)
2429                                         val |= 0xc010000;
2430                                 else
2431                                         val |= 0x4010000;
2432                                 tw32_f(MAC_SERDES_CFG, val);
2433                         }
2434                         tw32_f(SG_DIG_CTRL, 0x01388400);
2435                 }
2436                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2437                         tg3_setup_flow_control(tp, 0, 0);
2438                         current_link_up = 1;
2439                 }
2440                 goto out;
2441         }
2442
2443         /* Want auto-negotiation.  */
2444         expected_sg_dig_ctrl = 0x81388400;
2445
2446         /* Pause capability */
2447         expected_sg_dig_ctrl |= (1 << 11);
2448
2449         /* Asymettric pause */
2450         expected_sg_dig_ctrl |= (1 << 12);
2451
2452         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2453                 if (workaround)
2454                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2455                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2456                 udelay(5);
2457                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2458
2459                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2460         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2461                                  MAC_STATUS_SIGNAL_DET)) {
2462                 int i;
2463
2464                 /* Giver time to negotiate (~200ms) */
2465                 for (i = 0; i < 40000; i++) {
2466                         sg_dig_status = tr32(SG_DIG_STATUS);
2467                         if (sg_dig_status & (0x3))
2468                                 break;
2469                         udelay(5);
2470                 }
2471                 mac_status = tr32(MAC_STATUS);
2472
2473                 if ((sg_dig_status & (1 << 1)) &&
2474                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2475                         u32 local_adv, remote_adv;
2476
2477                         local_adv = ADVERTISE_PAUSE_CAP;
2478                         remote_adv = 0;
2479                         if (sg_dig_status & (1 << 19))
2480                                 remote_adv |= LPA_PAUSE_CAP;
2481                         if (sg_dig_status & (1 << 20))
2482                                 remote_adv |= LPA_PAUSE_ASYM;
2483
2484                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2485                         current_link_up = 1;
2486                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2487                 } else if (!(sg_dig_status & (1 << 1))) {
2488                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2489                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2490                         else {
2491                                 if (workaround) {
2492                                         u32 val = serdes_cfg;
2493
2494                                         if (port_a)
2495                                                 val |= 0xc010000;
2496                                         else
2497                                                 val |= 0x4010000;
2498
2499                                         tw32_f(MAC_SERDES_CFG, val);
2500                                 }
2501
2502                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2503                                 udelay(40);
2504
2505                                 /* Link parallel detection - link is up */
2506                                 /* only if we have PCS_SYNC and not */
2507                                 /* receiving config code words */
2508                                 mac_status = tr32(MAC_STATUS);
2509                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2510                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2511                                         tg3_setup_flow_control(tp, 0, 0);
2512                                         current_link_up = 1;
2513                                 }
2514                         }
2515                 }
2516         }
2517
2518 out:
2519         return current_link_up;
2520 }
2521
2522 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2523 {
2524         int current_link_up = 0;
2525
2526         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2527                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2528                 goto out;
2529         }
2530
2531         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2532                 u32 flags;
2533                 int i;
2534   
2535                 if (fiber_autoneg(tp, &flags)) {
2536                         u32 local_adv, remote_adv;
2537
2538                         local_adv = ADVERTISE_PAUSE_CAP;
2539                         remote_adv = 0;
2540                         if (flags & MR_LP_ADV_SYM_PAUSE)
2541                                 remote_adv |= LPA_PAUSE_CAP;
2542                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2543                                 remote_adv |= LPA_PAUSE_ASYM;
2544
2545                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2546
2547                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2548                         current_link_up = 1;
2549                 }
2550                 for (i = 0; i < 30; i++) {
2551                         udelay(20);
2552                         tw32_f(MAC_STATUS,
2553                                (MAC_STATUS_SYNC_CHANGED |
2554                                 MAC_STATUS_CFG_CHANGED));
2555                         udelay(40);
2556                         if ((tr32(MAC_STATUS) &
2557                              (MAC_STATUS_SYNC_CHANGED |
2558                               MAC_STATUS_CFG_CHANGED)) == 0)
2559                                 break;
2560                 }
2561
2562                 mac_status = tr32(MAC_STATUS);
2563                 if (current_link_up == 0 &&
2564                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2565                     !(mac_status & MAC_STATUS_RCVD_CFG))
2566                         current_link_up = 1;
2567         } else {
2568                 /* Forcing 1000FD link up. */
2569                 current_link_up = 1;
2570                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2571
2572                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2573                 udelay(40);
2574         }
2575
2576 out:
2577         return current_link_up;
2578 }
2579
2580 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2581 {
2582         u32 orig_pause_cfg;
2583         u16 orig_active_speed;
2584         u8 orig_active_duplex;
2585         u32 mac_status;
2586         int current_link_up;
2587         int i;
2588
2589         orig_pause_cfg =
2590                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2591                                   TG3_FLAG_TX_PAUSE));
2592         orig_active_speed = tp->link_config.active_speed;
2593         orig_active_duplex = tp->link_config.active_duplex;
2594
2595         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2596             netif_carrier_ok(tp->dev) &&
2597             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2598                 mac_status = tr32(MAC_STATUS);
2599                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2600                                MAC_STATUS_SIGNAL_DET |
2601                                MAC_STATUS_CFG_CHANGED |
2602                                MAC_STATUS_RCVD_CFG);
2603                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2604                                    MAC_STATUS_SIGNAL_DET)) {
2605                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2606                                             MAC_STATUS_CFG_CHANGED));
2607                         return 0;
2608                 }
2609         }
2610
2611         tw32_f(MAC_TX_AUTO_NEG, 0);
2612
2613         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2614         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2615         tw32_f(MAC_MODE, tp->mac_mode);
2616         udelay(40);
2617
2618         if (tp->phy_id == PHY_ID_BCM8002)
2619                 tg3_init_bcm8002(tp);
2620
2621         /* Enable link change event even when serdes polling.  */
2622         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2623         udelay(40);
2624
2625         current_link_up = 0;
2626         mac_status = tr32(MAC_STATUS);
2627
2628         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2629                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2630         else
2631                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2632
2633         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2634         tw32_f(MAC_MODE, tp->mac_mode);
2635         udelay(40);
2636
2637         tp->hw_status->status =
2638                 (SD_STATUS_UPDATED |
2639                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2640
2641         for (i = 0; i < 100; i++) {
2642                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2643                                     MAC_STATUS_CFG_CHANGED));
2644                 udelay(5);
2645                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2646                                          MAC_STATUS_CFG_CHANGED)) == 0)
2647                         break;
2648         }
2649
2650         mac_status = tr32(MAC_STATUS);
2651         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2652                 current_link_up = 0;
2653                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2654                         tw32_f(MAC_MODE, (tp->mac_mode |
2655                                           MAC_MODE_SEND_CONFIGS));
2656                         udelay(1);
2657                         tw32_f(MAC_MODE, tp->mac_mode);
2658                 }
2659         }
2660
2661         if (current_link_up == 1) {
2662                 tp->link_config.active_speed = SPEED_1000;
2663                 tp->link_config.active_duplex = DUPLEX_FULL;
2664                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2665                                     LED_CTRL_LNKLED_OVERRIDE |
2666                                     LED_CTRL_1000MBPS_ON));
2667         } else {
2668                 tp->link_config.active_speed = SPEED_INVALID;
2669                 tp->link_config.active_duplex = DUPLEX_INVALID;
2670                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2671                                     LED_CTRL_LNKLED_OVERRIDE |
2672                                     LED_CTRL_TRAFFIC_OVERRIDE));
2673         }
2674
2675         if (current_link_up != netif_carrier_ok(tp->dev)) {
2676                 if (current_link_up)
2677                         netif_carrier_on(tp->dev);
2678                 else
2679                         netif_carrier_off(tp->dev);
2680                 tg3_link_report(tp);
2681         } else {
2682                 u32 now_pause_cfg =
2683                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2684                                          TG3_FLAG_TX_PAUSE);
2685                 if (orig_pause_cfg != now_pause_cfg ||
2686                     orig_active_speed != tp->link_config.active_speed ||
2687                     orig_active_duplex != tp->link_config.active_duplex)
2688                         tg3_link_report(tp);
2689         }
2690
2691         return 0;
2692 }
2693
2694 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2695 {
2696         int current_link_up, err = 0;
2697         u32 bmsr, bmcr;
2698         u16 current_speed;
2699         u8 current_duplex;
2700
2701         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2702         tw32_f(MAC_MODE, tp->mac_mode);
2703         udelay(40);
2704
2705         tw32(MAC_EVENT, 0);
2706
2707         tw32_f(MAC_STATUS,
2708              (MAC_STATUS_SYNC_CHANGED |
2709               MAC_STATUS_CFG_CHANGED |
2710               MAC_STATUS_MI_COMPLETION |
2711               MAC_STATUS_LNKSTATE_CHANGED));
2712         udelay(40);
2713
2714         if (force_reset)
2715                 tg3_phy_reset(tp);
2716
2717         current_link_up = 0;
2718         current_speed = SPEED_INVALID;
2719         current_duplex = DUPLEX_INVALID;
2720
2721         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2722         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2723         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2724                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2725                         bmsr |= BMSR_LSTATUS;
2726                 else
2727                         bmsr &= ~BMSR_LSTATUS;
2728         }
2729
2730         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2731
2732         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2733             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2734                 /* do nothing, just check for link up at the end */
2735         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2736                 u32 adv, new_adv;
2737
2738                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2739                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2740                                   ADVERTISE_1000XPAUSE |
2741                                   ADVERTISE_1000XPSE_ASYM |
2742                                   ADVERTISE_SLCT);
2743
2744                 /* Always advertise symmetric PAUSE just like copper */
2745                 new_adv |= ADVERTISE_1000XPAUSE;
2746
2747                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2748                         new_adv |= ADVERTISE_1000XHALF;
2749                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2750                         new_adv |= ADVERTISE_1000XFULL;
2751
2752                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2753                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2754                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2755                         tg3_writephy(tp, MII_BMCR, bmcr);
2756
2757                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2758                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2759                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2760
2761                         return err;
2762                 }
2763         } else {
2764                 u32 new_bmcr;
2765
2766                 bmcr &= ~BMCR_SPEED1000;
2767                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2768
2769                 if (tp->link_config.duplex == DUPLEX_FULL)
2770                         new_bmcr |= BMCR_FULLDPLX;
2771
2772                 if (new_bmcr != bmcr) {
2773                         /* BMCR_SPEED1000 is a reserved bit that needs
2774                          * to be set on write.
2775                          */
2776                         new_bmcr |= BMCR_SPEED1000;
2777
2778                         /* Force a linkdown */
2779                         if (netif_carrier_ok(tp->dev)) {
2780                                 u32 adv;
2781
2782                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2783                                 adv &= ~(ADVERTISE_1000XFULL |
2784                                          ADVERTISE_1000XHALF |
2785                                          ADVERTISE_SLCT);
2786                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2787                                 tg3_writephy(tp, MII_BMCR, bmcr |
2788                                                            BMCR_ANRESTART |
2789                                                            BMCR_ANENABLE);
2790                                 udelay(10);
2791                                 netif_carrier_off(tp->dev);
2792                         }
2793                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2794                         bmcr = new_bmcr;
2795                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2796                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2797                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2798                             ASIC_REV_5714) {
2799                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2800                                         bmsr |= BMSR_LSTATUS;
2801                                 else
2802                                         bmsr &= ~BMSR_LSTATUS;
2803                         }
2804                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2805                 }
2806         }
2807
2808         if (bmsr & BMSR_LSTATUS) {
2809                 current_speed = SPEED_1000;
2810                 current_link_up = 1;
2811                 if (bmcr & BMCR_FULLDPLX)
2812                         current_duplex = DUPLEX_FULL;
2813                 else
2814                         current_duplex = DUPLEX_HALF;
2815
2816                 if (bmcr & BMCR_ANENABLE) {
2817                         u32 local_adv, remote_adv, common;
2818
2819                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2820                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2821                         common = local_adv & remote_adv;
2822                         if (common & (ADVERTISE_1000XHALF |
2823                                       ADVERTISE_1000XFULL)) {
2824                                 if (common & ADVERTISE_1000XFULL)
2825                                         current_duplex = DUPLEX_FULL;
2826                                 else
2827                                         current_duplex = DUPLEX_HALF;
2828
2829                                 tg3_setup_flow_control(tp, local_adv,
2830                                                        remote_adv);
2831                         }
2832                         else
2833                                 current_link_up = 0;
2834                 }
2835         }
2836
2837         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2838         if (tp->link_config.active_duplex == DUPLEX_HALF)
2839                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2840
2841         tw32_f(MAC_MODE, tp->mac_mode);
2842         udelay(40);
2843
2844         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2845
2846         tp->link_config.active_speed = current_speed;
2847         tp->link_config.active_duplex = current_duplex;
2848
2849         if (current_link_up != netif_carrier_ok(tp->dev)) {
2850                 if (current_link_up)
2851                         netif_carrier_on(tp->dev);
2852                 else {
2853                         netif_carrier_off(tp->dev);
2854                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2855                 }
2856                 tg3_link_report(tp);
2857         }
2858         return err;
2859 }
2860
2861 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2862 {
2863         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2864                 /* Give autoneg time to complete. */
2865                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2866                 return;
2867         }
2868         if (!netif_carrier_ok(tp->dev) &&
2869             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2870                 u32 bmcr;
2871
2872                 tg3_readphy(tp, MII_BMCR, &bmcr);
2873                 if (bmcr & BMCR_ANENABLE) {
2874                         u32 phy1, phy2;
2875
2876                         /* Select shadow register 0x1f */
2877                         tg3_writephy(tp, 0x1c, 0x7c00);
2878                         tg3_readphy(tp, 0x1c, &phy1);
2879
2880                         /* Select expansion interrupt status register */
2881                         tg3_writephy(tp, 0x17, 0x0f01);
2882                         tg3_readphy(tp, 0x15, &phy2);
2883                         tg3_readphy(tp, 0x15, &phy2);
2884
2885                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2886                                 /* We have signal detect and not receiving
2887                                  * config code words, link is up by parallel
2888                                  * detection.
2889                                  */
2890
2891                                 bmcr &= ~BMCR_ANENABLE;
2892                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2893                                 tg3_writephy(tp, MII_BMCR, bmcr);
2894                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2895                         }
2896                 }
2897         }
2898         else if (netif_carrier_ok(tp->dev) &&
2899                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2900                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2901                 u32 phy2;
2902
2903                 /* Select expansion interrupt status register */
2904                 tg3_writephy(tp, 0x17, 0x0f01);
2905                 tg3_readphy(tp, 0x15, &phy2);
2906                 if (phy2 & 0x20) {
2907                         u32 bmcr;
2908
2909                         /* Config code words received, turn on autoneg. */
2910                         tg3_readphy(tp, MII_BMCR, &bmcr);
2911                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2912
2913                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2914
2915                 }
2916         }
2917 }
2918
2919 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2920 {
2921         int err;
2922
2923         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2924                 err = tg3_setup_fiber_phy(tp, force_reset);
2925         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2926                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2927         } else {
2928                 err = tg3_setup_copper_phy(tp, force_reset);
2929         }
2930
2931         if (tp->link_config.active_speed == SPEED_1000 &&
2932             tp->link_config.active_duplex == DUPLEX_HALF)
2933                 tw32(MAC_TX_LENGTHS,
2934                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2935                       (6 << TX_LENGTHS_IPG_SHIFT) |
2936                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2937         else
2938                 tw32(MAC_TX_LENGTHS,
2939                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2940                       (6 << TX_LENGTHS_IPG_SHIFT) |
2941                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2942
2943         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2944                 if (netif_carrier_ok(tp->dev)) {
2945                         tw32(HOSTCC_STAT_COAL_TICKS,
2946                              tp->coal.stats_block_coalesce_usecs);
2947                 } else {
2948                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2949                 }
2950         }
2951
2952         return err;
2953 }
2954
2955 /* Tigon3 never reports partial packet sends.  So we do not
2956  * need special logic to handle SKBs that have not had all
2957  * of their frags sent yet, like SunGEM does.
2958  */
2959 static void tg3_tx(struct tg3 *tp)
2960 {
2961         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2962         u32 sw_idx = tp->tx_cons;
2963
2964         while (sw_idx != hw_idx) {
2965                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2966                 struct sk_buff *skb = ri->skb;
2967                 int i;
2968
2969                 BUG_ON(skb == NULL);
2970                 pci_unmap_single(tp->pdev,
2971                                  pci_unmap_addr(ri, mapping),
2972                                  skb_headlen(skb),
2973                                  PCI_DMA_TODEVICE);
2974
2975                 ri->skb = NULL;
2976
2977                 sw_idx = NEXT_TX(sw_idx);
2978
2979                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2980                         BUG_ON(sw_idx == hw_idx);
2981
2982                         ri = &tp->tx_buffers[sw_idx];
2983                         BUG_ON(ri->skb != NULL);
2984
2985                         pci_unmap_page(tp->pdev,
2986                                        pci_unmap_addr(ri, mapping),
2987                                        skb_shinfo(skb)->frags[i].size,
2988                                        PCI_DMA_TODEVICE);
2989
2990                         sw_idx = NEXT_TX(sw_idx);
2991                 }
2992
2993                 dev_kfree_skb(skb);
2994         }
2995
2996         tp->tx_cons = sw_idx;
2997
2998         if (unlikely(netif_queue_stopped(tp->dev))) {
2999                 spin_lock(&tp->tx_lock);
3000                 if (netif_queue_stopped(tp->dev) &&
3001                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3002                         netif_wake_queue(tp->dev);
3003                 spin_unlock(&tp->tx_lock);
3004         }
3005 }
3006
3007 /* Returns size of skb allocated or < 0 on error.
3008  *
3009  * We only need to fill in the address because the other members
3010  * of the RX descriptor are invariant, see tg3_init_rings.
3011  *
3012  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3013  * posting buffers we only dirty the first cache line of the RX
3014  * descriptor (containing the address).  Whereas for the RX status
3015  * buffers the cpu only reads the last cacheline of the RX descriptor
3016  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3017  */
3018 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3019                             int src_idx, u32 dest_idx_unmasked)
3020 {
3021         struct tg3_rx_buffer_desc *desc;
3022         struct ring_info *map, *src_map;
3023         struct sk_buff *skb;
3024         dma_addr_t mapping;
3025         int skb_size, dest_idx;
3026
3027         src_map = NULL;
3028         switch (opaque_key) {
3029         case RXD_OPAQUE_RING_STD:
3030                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3031                 desc = &tp->rx_std[dest_idx];
3032                 map = &tp->rx_std_buffers[dest_idx];
3033                 if (src_idx >= 0)
3034                         src_map = &tp->rx_std_buffers[src_idx];
3035                 skb_size = tp->rx_pkt_buf_sz;
3036                 break;
3037
3038         case RXD_OPAQUE_RING_JUMBO:
3039                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3040                 desc = &tp->rx_jumbo[dest_idx];
3041                 map = &tp->rx_jumbo_buffers[dest_idx];
3042                 if (src_idx >= 0)
3043                         src_map = &tp->rx_jumbo_buffers[src_idx];
3044                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3045                 break;
3046
3047         default:
3048                 return -EINVAL;
3049         };
3050
3051         /* Do not overwrite any of the map or rp information
3052          * until we are sure we can commit to a new buffer.
3053          *
3054          * Callers depend upon this behavior and assume that
3055          * we leave everything unchanged if we fail.
3056          */
3057         skb = dev_alloc_skb(skb_size);
3058         if (skb == NULL)
3059                 return -ENOMEM;
3060
3061         skb->dev = tp->dev;
3062         skb_reserve(skb, tp->rx_offset);
3063
3064         mapping = pci_map_single(tp->pdev, skb->data,
3065                                  skb_size - tp->rx_offset,
3066                                  PCI_DMA_FROMDEVICE);
3067
3068         map->skb = skb;
3069         pci_unmap_addr_set(map, mapping, mapping);
3070
3071         if (src_map != NULL)
3072                 src_map->skb = NULL;
3073
3074         desc->addr_hi = ((u64)mapping >> 32);
3075         desc->addr_lo = ((u64)mapping & 0xffffffff);
3076
3077         return skb_size;
3078 }
3079
3080 /* We only need to move over in the address because the other
3081  * members of the RX descriptor are invariant.  See notes above
3082  * tg3_alloc_rx_skb for full details.
3083  */
3084 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3085                            int src_idx, u32 dest_idx_unmasked)
3086 {
3087         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3088         struct ring_info *src_map, *dest_map;
3089         int dest_idx;
3090
3091         switch (opaque_key) {
3092         case RXD_OPAQUE_RING_STD:
3093                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3094                 dest_desc = &tp->rx_std[dest_idx];
3095                 dest_map = &tp->rx_std_buffers[dest_idx];
3096                 src_desc = &tp->rx_std[src_idx];
3097                 src_map = &tp->rx_std_buffers[src_idx];
3098                 break;
3099
3100         case RXD_OPAQUE_RING_JUMBO:
3101                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3102                 dest_desc = &tp->rx_jumbo[dest_idx];
3103                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3104                 src_desc = &tp->rx_jumbo[src_idx];
3105                 src_map = &tp->rx_jumbo_buffers[src_idx];
3106                 break;
3107
3108         default:
3109                 return;
3110         };
3111
3112         dest_map->skb = src_map->skb;
3113         pci_unmap_addr_set(dest_map, mapping,
3114                            pci_unmap_addr(src_map, mapping));
3115         dest_desc->addr_hi = src_desc->addr_hi;
3116         dest_desc->addr_lo = src_desc->addr_lo;
3117
3118         src_map->skb = NULL;
3119 }
3120
3121 #if TG3_VLAN_TAG_USED
3122 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3123 {
3124         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3125 }
3126 #endif
3127
3128 /* The RX ring scheme is composed of multiple rings which post fresh
3129  * buffers to the chip, and one special ring the chip uses to report
3130  * status back to the host.
3131  *
3132  * The special ring reports the status of received packets to the
3133  * host.  The chip does not write into the original descriptor the
3134  * RX buffer was obtained from.  The chip simply takes the original
3135  * descriptor as provided by the host, updates the status and length
3136  * field, then writes this into the next status ring entry.
3137  *
3138  * Each ring the host uses to post buffers to the chip is described
3139  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3140  * it is first placed into the on-chip ram.  When the packet's length
3141  * is known, it walks down the TG3_BDINFO entries to select the ring.
3142  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3143  * which is within the range of the new packet's length is chosen.
3144  *
3145  * The "separate ring for rx status" scheme may sound queer, but it makes
3146  * sense from a cache coherency perspective.  If only the host writes
3147  * to the buffer post rings, and only the chip writes to the rx status
3148  * rings, then cache lines never move beyond shared-modified state.
3149  * If both the host and chip were to write into the same ring, cache line
3150  * eviction could occur since both entities want it in an exclusive state.
3151  */
3152 static int tg3_rx(struct tg3 *tp, int budget)
3153 {
3154         u32 work_mask;
3155         u32 sw_idx = tp->rx_rcb_ptr;
3156         u16 hw_idx;
3157         int received;
3158
3159         hw_idx = tp->hw_status->idx[0].rx_producer;
3160         /*
3161          * We need to order the read of hw_idx and the read of
3162          * the opaque cookie.
3163          */
3164         rmb();
3165         work_mask = 0;
3166         received = 0;
3167         while (sw_idx != hw_idx && budget > 0) {
3168                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3169                 unsigned int len;
3170                 struct sk_buff *skb;
3171                 dma_addr_t dma_addr;
3172                 u32 opaque_key, desc_idx, *post_ptr;
3173
3174                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3175                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3176                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3177                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3178                                                   mapping);
3179                         skb = tp->rx_std_buffers[desc_idx].skb;
3180                         post_ptr = &tp->rx_std_ptr;
3181                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3182                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3183                                                   mapping);
3184                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3185                         post_ptr = &tp->rx_jumbo_ptr;
3186                 }
3187                 else {
3188                         goto next_pkt_nopost;
3189                 }
3190
3191                 work_mask |= opaque_key;
3192
3193                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3194                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3195                 drop_it:
3196                         tg3_recycle_rx(tp, opaque_key,
3197                                        desc_idx, *post_ptr);
3198                 drop_it_no_recycle:
3199                         /* Other statistics kept track of by card. */
3200                         tp->net_stats.rx_dropped++;
3201                         goto next_pkt;
3202                 }
3203
3204                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3205
3206                 if (len > RX_COPY_THRESHOLD 
3207                         && tp->rx_offset == 2
3208                         /* rx_offset != 2 iff this is a 5701 card running
3209                          * in PCI-X mode [see tg3_get_invariants()] */
3210                 ) {
3211                         int skb_size;
3212
3213                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3214                                                     desc_idx, *post_ptr);
3215                         if (skb_size < 0)
3216                                 goto drop_it;
3217
3218                         pci_unmap_single(tp->pdev, dma_addr,
3219                                          skb_size - tp->rx_offset,
3220                                          PCI_DMA_FROMDEVICE);
3221
3222                         skb_put(skb, len);
3223                 } else {
3224                         struct sk_buff *copy_skb;
3225
3226                         tg3_recycle_rx(tp, opaque_key,
3227                                        desc_idx, *post_ptr);
3228
3229                         copy_skb = dev_alloc_skb(len + 2);
3230                         if (copy_skb == NULL)
3231                                 goto drop_it_no_recycle;
3232
3233                         copy_skb->dev = tp->dev;
3234                         skb_reserve(copy_skb, 2);
3235                         skb_put(copy_skb, len);
3236                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3237                         memcpy(copy_skb->data, skb->data, len);
3238                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3239
3240                         /* We'll reuse the original ring buffer. */
3241                         skb = copy_skb;
3242                 }
3243
3244                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3245                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3246                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3247                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3248                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3249                 else
3250                         skb->ip_summed = CHECKSUM_NONE;
3251
3252                 skb->protocol = eth_type_trans(skb, tp->dev);
3253 #if TG3_VLAN_TAG_USED
3254                 if (tp->vlgrp != NULL &&
3255                     desc->type_flags & RXD_FLAG_VLAN) {
3256                         tg3_vlan_rx(tp, skb,
3257                                     desc->err_vlan & RXD_VLAN_MASK);
3258                 } else
3259 #endif
3260                         netif_receive_skb(skb);
3261
3262                 tp->dev->last_rx = jiffies;
3263                 received++;
3264                 budget--;
3265
3266 next_pkt:
3267                 (*post_ptr)++;
3268 next_pkt_nopost:
3269                 sw_idx++;
3270                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3271
3272                 /* Refresh hw_idx to see if there is new work */
3273                 if (sw_idx == hw_idx) {
3274                         hw_idx = tp->hw_status->idx[0].rx_producer;
3275                         rmb();
3276                 }
3277         }
3278
3279         /* ACK the status ring. */
3280         tp->rx_rcb_ptr = sw_idx;
3281         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3282
3283         /* Refill RX ring(s). */
3284         if (work_mask & RXD_OPAQUE_RING_STD) {
3285                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3286                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3287                              sw_idx);
3288         }
3289         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3290                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3291                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3292                              sw_idx);
3293         }
3294         mmiowb();
3295
3296         return received;
3297 }
3298
3299 static int tg3_poll(struct net_device *netdev, int *budget)
3300 {
3301         struct tg3 *tp = netdev_priv(netdev);
3302         struct tg3_hw_status *sblk = tp->hw_status;
3303         int done;
3304
3305         /* handle link change and other phy events */
3306         if (!(tp->tg3_flags &
3307               (TG3_FLAG_USE_LINKCHG_REG |
3308                TG3_FLAG_POLL_SERDES))) {
3309                 if (sblk->status & SD_STATUS_LINK_CHG) {
3310                         sblk->status = SD_STATUS_UPDATED |
3311                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3312                         spin_lock(&tp->lock);
3313                         tg3_setup_phy(tp, 0);
3314                         spin_unlock(&tp->lock);
3315                 }
3316         }
3317
3318         /* run TX completion thread */
3319         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3320                 tg3_tx(tp);
3321         }
3322
3323         /* run RX thread, within the bounds set by NAPI.
3324          * All RX "locking" is done by ensuring outside
3325          * code synchronizes with dev->poll()
3326          */
3327         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3328                 int orig_budget = *budget;
3329                 int work_done;
3330
3331                 if (orig_budget > netdev->quota)
3332                         orig_budget = netdev->quota;
3333
3334                 work_done = tg3_rx(tp, orig_budget);
3335
3336                 *budget -= work_done;
3337                 netdev->quota -= work_done;
3338         }
3339
3340         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3341                 tp->last_tag = sblk->status_tag;
3342                 rmb();
3343         } else
3344                 sblk->status &= ~SD_STATUS_UPDATED;
3345
3346         /* if no more work, tell net stack and NIC we're done */
3347         done = !tg3_has_work(tp);
3348         if (done) {
3349                 netif_rx_complete(netdev);
3350                 tg3_restart_ints(tp);
3351         }
3352
3353         return (done ? 0 : 1);
3354 }
3355
3356 static void tg3_irq_quiesce(struct tg3 *tp)
3357 {
3358         BUG_ON(tp->irq_sync);
3359
3360         tp->irq_sync = 1;
3361         smp_mb();
3362
3363         synchronize_irq(tp->pdev->irq);
3364 }
3365
3366 static inline int tg3_irq_sync(struct tg3 *tp)
3367 {
3368         return tp->irq_sync;
3369 }
3370
3371 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3372  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3373  * with as well.  Most of the time, this is not necessary except when
3374  * shutting down the device.
3375  */
3376 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3377 {
3378         if (irq_sync)
3379                 tg3_irq_quiesce(tp);
3380         spin_lock_bh(&tp->lock);
3381         spin_lock(&tp->tx_lock);
3382 }
3383
3384 static inline void tg3_full_unlock(struct tg3 *tp)
3385 {
3386         spin_unlock(&tp->tx_lock);
3387         spin_unlock_bh(&tp->lock);
3388 }
3389
3390 /* One-shot MSI handler - Chip automatically disables interrupt
3391  * after sending MSI so driver doesn't have to do it.
3392  */
3393 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3394 {
3395         struct net_device *dev = dev_id;
3396         struct tg3 *tp = netdev_priv(dev);
3397
3398         prefetch(tp->hw_status);
3399         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3400
3401         if (likely(!tg3_irq_sync(tp)))
3402                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3403
3404         return IRQ_HANDLED;
3405 }
3406
3407 /* MSI ISR - No need to check for interrupt sharing and no need to
3408  * flush status block and interrupt mailbox. PCI ordering rules
3409  * guarantee that MSI will arrive after the status block.
3410  */
3411 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3412 {
3413         struct net_device *dev = dev_id;
3414         struct tg3 *tp = netdev_priv(dev);
3415
3416         prefetch(tp->hw_status);
3417         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3418         /*
3419          * Writing any value to intr-mbox-0 clears PCI INTA# and
3420          * chip-internal interrupt pending events.
3421          * Writing non-zero to intr-mbox-0 additional tells the
3422          * NIC to stop sending us irqs, engaging "in-intr-handler"
3423          * event coalescing.
3424          */
3425         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3426         if (likely(!tg3_irq_sync(tp)))
3427                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3428
3429         return IRQ_RETVAL(1);
3430 }
3431
3432 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3433 {
3434         struct net_device *dev = dev_id;
3435         struct tg3 *tp = netdev_priv(dev);
3436         struct tg3_hw_status *sblk = tp->hw_status;
3437         unsigned int handled = 1;
3438
3439         /* In INTx mode, it is possible for the interrupt to arrive at
3440          * the CPU before the status block posted prior to the interrupt.
3441          * Reading the PCI State register will confirm whether the
3442          * interrupt is ours and will flush the status block.
3443          */
3444         if ((sblk->status & SD_STATUS_UPDATED) ||
3445             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3446                 /*
3447                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3448                  * chip-internal interrupt pending events.
3449                  * Writing non-zero to intr-mbox-0 additional tells the
3450                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3451                  * event coalescing.
3452                  */
3453                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3454                              0x00000001);
3455                 if (tg3_irq_sync(tp))
3456                         goto out;
3457                 sblk->status &= ~SD_STATUS_UPDATED;
3458                 if (likely(tg3_has_work(tp))) {
3459                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3460                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3461                 } else {
3462                         /* No work, shared interrupt perhaps?  re-enable
3463                          * interrupts, and flush that PCI write
3464                          */
3465                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3466                                 0x00000000);
3467                 }
3468         } else {        /* shared interrupt */
3469                 handled = 0;
3470         }
3471 out:
3472         return IRQ_RETVAL(handled);
3473 }
3474
3475 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3476 {
3477         struct net_device *dev = dev_id;
3478         struct tg3 *tp = netdev_priv(dev);
3479         struct tg3_hw_status *sblk = tp->hw_status;
3480         unsigned int handled = 1;
3481
3482         /* In INTx mode, it is possible for the interrupt to arrive at
3483          * the CPU before the status block posted prior to the interrupt.
3484          * Reading the PCI State register will confirm whether the
3485          * interrupt is ours and will flush the status block.
3486          */
3487         if ((sblk->status_tag != tp->last_tag) ||
3488             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3489                 /*
3490                  * writing any value to intr-mbox-0 clears PCI INTA# and
3491                  * chip-internal interrupt pending events.
3492                  * writing non-zero to intr-mbox-0 additional tells the
3493                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3494                  * event coalescing.
3495                  */
3496                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3497                              0x00000001);
3498                 if (tg3_irq_sync(tp))
3499                         goto out;
3500                 if (netif_rx_schedule_prep(dev)) {
3501                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3502                         /* Update last_tag to mark that this status has been
3503                          * seen. Because interrupt may be shared, we may be
3504                          * racing with tg3_poll(), so only update last_tag
3505                          * if tg3_poll() is not scheduled.
3506                          */
3507                         tp->last_tag = sblk->status_tag;
3508                         __netif_rx_schedule(dev);
3509                 }
3510         } else {        /* shared interrupt */
3511                 handled = 0;
3512         }
3513 out:
3514         return IRQ_RETVAL(handled);
3515 }
3516
3517 /* ISR for interrupt test */
3518 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3519                 struct pt_regs *regs)
3520 {
3521         struct net_device *dev = dev_id;
3522         struct tg3 *tp = netdev_priv(dev);
3523         struct tg3_hw_status *sblk = tp->hw_status;
3524
3525         if ((sblk->status & SD_STATUS_UPDATED) ||
3526             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3527                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3528                              0x00000001);
3529                 return IRQ_RETVAL(1);
3530         }
3531         return IRQ_RETVAL(0);
3532 }
3533
3534 static int tg3_init_hw(struct tg3 *);
3535 static int tg3_halt(struct tg3 *, int, int);
3536
3537 #ifdef CONFIG_NET_POLL_CONTROLLER
3538 static void tg3_poll_controller(struct net_device *dev)
3539 {
3540         struct tg3 *tp = netdev_priv(dev);
3541
3542         tg3_interrupt(tp->pdev->irq, dev, NULL);
3543 }
3544 #endif
3545
3546 static void tg3_reset_task(void *_data)
3547 {
3548         struct tg3 *tp = _data;
3549         unsigned int restart_timer;
3550
3551         tg3_full_lock(tp, 0);
3552         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3553
3554         if (!netif_running(tp->dev)) {
3555                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3556                 tg3_full_unlock(tp);
3557                 return;
3558         }
3559
3560         tg3_full_unlock(tp);
3561
3562         tg3_netif_stop(tp);
3563
3564         tg3_full_lock(tp, 1);
3565
3566         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3567         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3568
3569         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3570         tg3_init_hw(tp);
3571
3572         tg3_netif_start(tp);
3573
3574         if (restart_timer)
3575                 mod_timer(&tp->timer, jiffies + 1);
3576
3577         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3578
3579         tg3_full_unlock(tp);
3580 }
3581
3582 static void tg3_tx_timeout(struct net_device *dev)
3583 {
3584         struct tg3 *tp = netdev_priv(dev);
3585
3586         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3587                dev->name);
3588
3589         schedule_work(&tp->reset_task);
3590 }
3591
3592 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3593 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3594 {
3595         u32 base = (u32) mapping & 0xffffffff;
3596
3597         return ((base > 0xffffdcc0) &&
3598                 (base + len + 8 < base));
3599 }
3600
3601 /* Test for DMA addresses > 40-bit */
3602 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3603                                           int len)
3604 {
3605 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3606         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3607                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3608         return 0;
3609 #else
3610         return 0;
3611 #endif
3612 }
3613
3614 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3615
3616 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3617 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3618                                        u32 last_plus_one, u32 *start,
3619                                        u32 base_flags, u32 mss)
3620 {
3621         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3622         dma_addr_t new_addr = 0;
3623         u32 entry = *start;
3624         int i, ret = 0;
3625
3626         if (!new_skb) {
3627                 ret = -1;
3628         } else {
3629                 /* New SKB is guaranteed to be linear. */
3630                 entry = *start;
3631                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3632                                           PCI_DMA_TODEVICE);
3633                 /* Make sure new skb does not cross any 4G boundaries.
3634                  * Drop the packet if it does.
3635                  */
3636                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3637                         ret = -1;
3638                         dev_kfree_skb(new_skb);
3639                         new_skb = NULL;
3640                 } else {
3641                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3642                                     base_flags, 1 | (mss << 1));
3643                         *start = NEXT_TX(entry);
3644                 }
3645         }
3646
3647         /* Now clean up the sw ring entries. */
3648         i = 0;
3649         while (entry != last_plus_one) {
3650                 int len;
3651
3652                 if (i == 0)
3653                         len = skb_headlen(skb);
3654                 else
3655                         len = skb_shinfo(skb)->frags[i-1].size;
3656                 pci_unmap_single(tp->pdev,
3657                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3658                                  len, PCI_DMA_TODEVICE);
3659                 if (i == 0) {
3660                         tp->tx_buffers[entry].skb = new_skb;
3661                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3662                 } else {
3663                         tp->tx_buffers[entry].skb = NULL;
3664                 }
3665                 entry = NEXT_TX(entry);
3666                 i++;
3667         }
3668
3669         dev_kfree_skb(skb);
3670
3671         return ret;
3672 }
3673
3674 static void tg3_set_txd(struct tg3 *tp, int entry,
3675                         dma_addr_t mapping, int len, u32 flags,
3676                         u32 mss_and_is_end)
3677 {
3678         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3679         int is_end = (mss_and_is_end & 0x1);
3680         u32 mss = (mss_and_is_end >> 1);
3681         u32 vlan_tag = 0;
3682
3683         if (is_end)
3684                 flags |= TXD_FLAG_END;
3685         if (flags & TXD_FLAG_VLAN) {
3686                 vlan_tag = flags >> 16;
3687                 flags &= 0xffff;
3688         }
3689         vlan_tag |= (mss << TXD_MSS_SHIFT);
3690
3691         txd->addr_hi = ((u64) mapping >> 32);
3692         txd->addr_lo = ((u64) mapping & 0xffffffff);
3693         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3694         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3695 }
3696
3697 /* hard_start_xmit for devices that don't have any bugs and
3698  * support TG3_FLG2_HW_TSO_2 only.
3699  */
3700 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3701 {
3702         struct tg3 *tp = netdev_priv(dev);
3703         dma_addr_t mapping;
3704         u32 len, entry, base_flags, mss;
3705
3706         len = skb_headlen(skb);
3707
3708         /* No BH disabling for tx_lock here.  We are running in BH disabled
3709          * context and TX reclaim runs via tp->poll inside of a software
3710          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3711          * no IRQ context deadlocks to worry about either.  Rejoice!
3712          */
3713         if (!spin_trylock(&tp->tx_lock))
3714                 return NETDEV_TX_LOCKED;
3715
3716         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3717                 if (!netif_queue_stopped(dev)) {
3718                         netif_stop_queue(dev);
3719
3720                         /* This is a hard error, log it. */
3721                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3722                                "queue awake!\n", dev->name);
3723                 }
3724                 spin_unlock(&tp->tx_lock);
3725                 return NETDEV_TX_BUSY;
3726         }
3727
3728         entry = tp->tx_prod;
3729         base_flags = 0;
3730 #if TG3_TSO_SUPPORT != 0
3731         mss = 0;
3732         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3733             (mss = skb_shinfo(skb)->tso_size) != 0) {
3734                 int tcp_opt_len, ip_tcp_len;
3735
3736                 if (skb_header_cloned(skb) &&
3737                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3738                         dev_kfree_skb(skb);
3739                         goto out_unlock;
3740                 }
3741
3742                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3743                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3744
3745                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3746                                TXD_FLAG_CPU_POST_DMA);
3747
3748                 skb->nh.iph->check = 0;
3749                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3750
3751                 skb->h.th->check = 0;
3752
3753                 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3754         }
3755         else if (skb->ip_summed == CHECKSUM_HW)
3756                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3757 #else
3758         mss = 0;
3759         if (skb->ip_summed == CHECKSUM_HW)
3760                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3761 #endif
3762 #if TG3_VLAN_TAG_USED
3763         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3764                 base_flags |= (TXD_FLAG_VLAN |
3765                                (vlan_tx_tag_get(skb) << 16));
3766 #endif
3767
3768         /* Queue skb data, a.k.a. the main skb fragment. */
3769         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3770
3771         tp->tx_buffers[entry].skb = skb;
3772         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3773
3774         tg3_set_txd(tp, entry, mapping, len, base_flags,
3775                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3776
3777         entry = NEXT_TX(entry);
3778
3779         /* Now loop through additional data fragments, and queue them. */
3780         if (skb_shinfo(skb)->nr_frags > 0) {
3781                 unsigned int i, last;
3782
3783                 last = skb_shinfo(skb)->nr_frags - 1;
3784                 for (i = 0; i <= last; i++) {
3785                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3786
3787                         len = frag->size;
3788                         mapping = pci_map_page(tp->pdev,
3789                                                frag->page,
3790                                                frag->page_offset,
3791                                                len, PCI_DMA_TODEVICE);
3792
3793                         tp->tx_buffers[entry].skb = NULL;
3794                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3795
3796                         tg3_set_txd(tp, entry, mapping, len,
3797                                     base_flags, (i == last) | (mss << 1));
3798
3799                         entry = NEXT_TX(entry);
3800                 }
3801         }
3802
3803         /* Packets are ready, update Tx producer idx local and on card. */
3804         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3805
3806         tp->tx_prod = entry;
3807         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3808                 netif_stop_queue(dev);
3809                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3810                         netif_wake_queue(tp->dev);
3811         }
3812
3813 out_unlock:
3814         mmiowb();
3815         spin_unlock(&tp->tx_lock);
3816
3817         dev->trans_start = jiffies;
3818
3819         return NETDEV_TX_OK;
3820 }
3821
3822 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3823  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3824  */
3825 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3826 {
3827         struct tg3 *tp = netdev_priv(dev);
3828         dma_addr_t mapping;
3829         u32 len, entry, base_flags, mss;
3830         int would_hit_hwbug;
3831
3832         len = skb_headlen(skb);
3833
3834         /* No BH disabling for tx_lock here.  We are running in BH disabled
3835          * context and TX reclaim runs via tp->poll inside of a software
3836          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3837          * no IRQ context deadlocks to worry about either.  Rejoice!
3838          */
3839         if (!spin_trylock(&tp->tx_lock))
3840                 return NETDEV_TX_LOCKED; 
3841
3842         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3843                 if (!netif_queue_stopped(dev)) {
3844                         netif_stop_queue(dev);
3845
3846                         /* This is a hard error, log it. */
3847                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3848                                "queue awake!\n", dev->name);
3849                 }
3850                 spin_unlock(&tp->tx_lock);
3851                 return NETDEV_TX_BUSY;
3852         }
3853
3854         entry = tp->tx_prod;
3855         base_flags = 0;
3856         if (skb->ip_summed == CHECKSUM_HW)
3857                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3858 #if TG3_TSO_SUPPORT != 0
3859         mss = 0;
3860         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3861             (mss = skb_shinfo(skb)->tso_size) != 0) {
3862                 int tcp_opt_len, ip_tcp_len;
3863
3864                 if (skb_header_cloned(skb) &&
3865                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3866                         dev_kfree_skb(skb);
3867                         goto out_unlock;
3868                 }
3869
3870                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3871                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3872
3873                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3874                                TXD_FLAG_CPU_POST_DMA);
3875
3876                 skb->nh.iph->check = 0;
3877                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3878                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3879                         skb->h.th->check = 0;
3880                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3881                 }
3882                 else {
3883                         skb->h.th->check =
3884                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3885                                                    skb->nh.iph->daddr,
3886                                                    0, IPPROTO_TCP, 0);
3887                 }
3888
3889                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3890                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3891                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3892                                 int tsflags;
3893
3894                                 tsflags = ((skb->nh.iph->ihl - 5) +
3895                                            (tcp_opt_len >> 2));
3896                                 mss |= (tsflags << 11);
3897                         }
3898                 } else {
3899                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3900                                 int tsflags;
3901
3902                                 tsflags = ((skb->nh.iph->ihl - 5) +
3903                                            (tcp_opt_len >> 2));
3904                                 base_flags |= tsflags << 12;
3905                         }
3906                 }
3907         }
3908 #else
3909         mss = 0;
3910 #endif
3911 #if TG3_VLAN_TAG_USED
3912         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3913                 base_flags |= (TXD_FLAG_VLAN |
3914                                (vlan_tx_tag_get(skb) << 16));
3915 #endif
3916
3917         /* Queue skb data, a.k.a. the main skb fragment. */
3918         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3919
3920         tp->tx_buffers[entry].skb = skb;
3921         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3922
3923         would_hit_hwbug = 0;
3924
3925         if (tg3_4g_overflow_test(mapping, len))
3926                 would_hit_hwbug = 1;
3927
3928         tg3_set_txd(tp, entry, mapping, len, base_flags,
3929                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3930
3931         entry = NEXT_TX(entry);
3932
3933         /* Now loop through additional data fragments, and queue them. */
3934         if (skb_shinfo(skb)->nr_frags > 0) {
3935                 unsigned int i, last;
3936
3937                 last = skb_shinfo(skb)->nr_frags - 1;
3938                 for (i = 0; i <= last; i++) {
3939                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3940
3941                         len = frag->size;
3942                         mapping = pci_map_page(tp->pdev,
3943                                                frag->page,
3944                                                frag->page_offset,
3945                                                len, PCI_DMA_TODEVICE);
3946
3947                         tp->tx_buffers[entry].skb = NULL;
3948                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3949
3950                         if (tg3_4g_overflow_test(mapping, len))
3951                                 would_hit_hwbug = 1;
3952
3953                         if (tg3_40bit_overflow_test(tp, mapping, len))
3954                                 would_hit_hwbug = 1;
3955
3956                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3957                                 tg3_set_txd(tp, entry, mapping, len,
3958                                             base_flags, (i == last)|(mss << 1));
3959                         else
3960                                 tg3_set_txd(tp, entry, mapping, len,
3961                                             base_flags, (i == last));
3962
3963                         entry = NEXT_TX(entry);
3964                 }
3965         }
3966
3967         if (would_hit_hwbug) {
3968                 u32 last_plus_one = entry;
3969                 u32 start;
3970
3971                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3972                 start &= (TG3_TX_RING_SIZE - 1);
3973
3974                 /* If the workaround fails due to memory/mapping
3975                  * failure, silently drop this packet.
3976                  */
3977                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3978                                                 &start, base_flags, mss))
3979                         goto out_unlock;
3980
3981                 entry = start;
3982         }
3983
3984         /* Packets are ready, update Tx producer idx local and on card. */
3985         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3986
3987         tp->tx_prod = entry;
3988         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3989                 netif_stop_queue(dev);
3990                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3991                         netif_wake_queue(tp->dev);
3992         }
3993
3994 out_unlock:
3995         mmiowb();
3996         spin_unlock(&tp->tx_lock);
3997
3998         dev->trans_start = jiffies;
3999
4000         return NETDEV_TX_OK;
4001 }
4002
4003 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4004                                int new_mtu)
4005 {
4006         dev->mtu = new_mtu;
4007
4008         if (new_mtu > ETH_DATA_LEN) {
4009                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4010                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4011                         ethtool_op_set_tso(dev, 0);
4012                 }
4013                 else
4014                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4015         } else {
4016                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4017                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4018                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4019         }
4020 }
4021
4022 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4023 {
4024         struct tg3 *tp = netdev_priv(dev);
4025
4026         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4027                 return -EINVAL;
4028
4029         if (!netif_running(dev)) {
4030                 /* We'll just catch it later when the
4031                  * device is up'd.
4032                  */
4033                 tg3_set_mtu(dev, tp, new_mtu);
4034                 return 0;
4035         }
4036
4037         tg3_netif_stop(tp);
4038
4039         tg3_full_lock(tp, 1);
4040
4041         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4042
4043         tg3_set_mtu(dev, tp, new_mtu);
4044
4045         tg3_init_hw(tp);
4046
4047         tg3_netif_start(tp);
4048
4049         tg3_full_unlock(tp);
4050
4051         return 0;
4052 }
4053
4054 /* Free up pending packets in all rx/tx rings.
4055  *
4056  * The chip has been shut down and the driver detached from
4057  * the networking, so no interrupts or new tx packets will
4058  * end up in the driver.  tp->{tx,}lock is not held and we are not
4059  * in an interrupt context and thus may sleep.
4060  */
4061 static void tg3_free_rings(struct tg3 *tp)
4062 {
4063         struct ring_info *rxp;
4064         int i;
4065
4066         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4067                 rxp = &tp->rx_std_buffers[i];
4068
4069                 if (rxp->skb == NULL)
4070                         continue;
4071                 pci_unmap_single(tp->pdev,
4072                                  pci_unmap_addr(rxp, mapping),
4073                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4074                                  PCI_DMA_FROMDEVICE);
4075                 dev_kfree_skb_any(rxp->skb);
4076                 rxp->skb = NULL;
4077         }
4078
4079         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4080                 rxp = &tp->rx_jumbo_buffers[i];
4081
4082                 if (rxp->skb == NULL)
4083                         continue;
4084                 pci_unmap_single(tp->pdev,
4085                                  pci_unmap_addr(rxp, mapping),
4086                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4087                                  PCI_DMA_FROMDEVICE);
4088                 dev_kfree_skb_any(rxp->skb);
4089                 rxp->skb = NULL;
4090         }
4091
4092         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4093                 struct tx_ring_info *txp;
4094                 struct sk_buff *skb;
4095                 int j;
4096
4097                 txp = &tp->tx_buffers[i];
4098                 skb = txp->skb;
4099
4100                 if (skb == NULL) {
4101                         i++;
4102                         continue;
4103                 }
4104
4105                 pci_unmap_single(tp->pdev,
4106                                  pci_unmap_addr(txp, mapping),
4107                                  skb_headlen(skb),
4108                                  PCI_DMA_TODEVICE);
4109                 txp->skb = NULL;
4110
4111                 i++;
4112
4113                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4114                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4115                         pci_unmap_page(tp->pdev,
4116                                        pci_unmap_addr(txp, mapping),
4117                                        skb_shinfo(skb)->frags[j].size,
4118                                        PCI_DMA_TODEVICE);
4119                         i++;
4120                 }
4121
4122                 dev_kfree_skb_any(skb);
4123         }
4124 }
4125
4126 /* Initialize tx/rx rings for packet processing.
4127  *
4128  * The chip has been shut down and the driver detached from
4129  * the networking, so no interrupts or new tx packets will
4130  * end up in the driver.  tp->{tx,}lock are held and thus
4131  * we may not sleep.
4132  */
4133 static void tg3_init_rings(struct tg3 *tp)
4134 {
4135         u32 i;
4136
4137         /* Free up all the SKBs. */
4138         tg3_free_rings(tp);
4139
4140         /* Zero out all descriptors. */
4141         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4142         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4143         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4144         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4145
4146         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4147         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4148             (tp->dev->mtu > ETH_DATA_LEN))
4149                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4150
4151         /* Initialize invariants of the rings, we only set this
4152          * stuff once.  This works because the card does not
4153          * write into the rx buffer posting rings.
4154          */
4155         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4156                 struct tg3_rx_buffer_desc *rxd;
4157
4158                 rxd = &tp->rx_std[i];
4159                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4160                         << RXD_LEN_SHIFT;
4161                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4162                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4163                                (i << RXD_OPAQUE_INDEX_SHIFT));
4164         }
4165
4166         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4167                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4168                         struct tg3_rx_buffer_desc *rxd;
4169
4170                         rxd = &tp->rx_jumbo[i];
4171                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4172                                 << RXD_LEN_SHIFT;
4173                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4174                                 RXD_FLAG_JUMBO;
4175                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4176                                (i << RXD_OPAQUE_INDEX_SHIFT));
4177                 }
4178         }
4179
4180         /* Now allocate fresh SKBs for each rx ring. */
4181         for (i = 0; i < tp->rx_pending; i++) {
4182                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4183                                      -1, i) < 0)
4184                         break;
4185         }
4186
4187         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4188                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4189                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4190                                              -1, i) < 0)
4191                                 break;
4192                 }
4193         }
4194 }
4195
4196 /*
4197  * Must not be invoked with interrupt sources disabled and
4198  * the hardware shutdown down.
4199  */
4200 static void tg3_free_consistent(struct tg3 *tp)
4201 {
4202         kfree(tp->rx_std_buffers);
4203         tp->rx_std_buffers = NULL;
4204         if (tp->rx_std) {
4205                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4206                                     tp->rx_std, tp->rx_std_mapping);
4207                 tp->rx_std = NULL;
4208         }
4209         if (tp->rx_jumbo) {
4210                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4211                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4212                 tp->rx_jumbo = NULL;
4213         }
4214         if (tp->rx_rcb) {
4215                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4216                                     tp->rx_rcb, tp->rx_rcb_mapping);
4217                 tp->rx_rcb = NULL;
4218         }
4219         if (tp->tx_ring) {
4220                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4221                         tp->tx_ring, tp->tx_desc_mapping);
4222                 tp->tx_ring = NULL;
4223         }
4224         if (tp->hw_status) {
4225                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4226                                     tp->hw_status, tp->status_mapping);
4227                 tp->hw_status = NULL;
4228         }
4229         if (tp->hw_stats) {
4230                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4231                                     tp->hw_stats, tp->stats_mapping);
4232                 tp->hw_stats = NULL;
4233         }
4234 }
4235
4236 /*
4237  * Must not be invoked with interrupt sources disabled and
4238  * the hardware shutdown down.  Can sleep.
4239  */
4240 static int tg3_alloc_consistent(struct tg3 *tp)
4241 {
4242         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4243                                       (TG3_RX_RING_SIZE +
4244                                        TG3_RX_JUMBO_RING_SIZE)) +
4245                                      (sizeof(struct tx_ring_info) *
4246                                       TG3_TX_RING_SIZE),
4247                                      GFP_KERNEL);
4248         if (!tp->rx_std_buffers)
4249                 return -ENOMEM;
4250
4251         memset(tp->rx_std_buffers, 0,
4252                (sizeof(struct ring_info) *
4253                 (TG3_RX_RING_SIZE +
4254                  TG3_RX_JUMBO_RING_SIZE)) +
4255                (sizeof(struct tx_ring_info) *
4256                 TG3_TX_RING_SIZE));
4257
4258         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4259         tp->tx_buffers = (struct tx_ring_info *)
4260                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4261
4262         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4263                                           &tp->rx_std_mapping);
4264         if (!tp->rx_std)
4265                 goto err_out;
4266
4267         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4268                                             &tp->rx_jumbo_mapping);
4269
4270         if (!tp->rx_jumbo)
4271                 goto err_out;
4272
4273         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4274                                           &tp->rx_rcb_mapping);
4275         if (!tp->rx_rcb)
4276                 goto err_out;
4277
4278         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4279                                            &tp->tx_desc_mapping);
4280         if (!tp->tx_ring)
4281                 goto err_out;
4282
4283         tp->hw_status = pci_alloc_consistent(tp->pdev,
4284                                              TG3_HW_STATUS_SIZE,
4285                                              &tp->status_mapping);
4286         if (!tp->hw_status)
4287                 goto err_out;
4288
4289         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4290                                             sizeof(struct tg3_hw_stats),
4291                                             &tp->stats_mapping);
4292         if (!tp->hw_stats)
4293                 goto err_out;
4294
4295         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4296         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4297
4298         return 0;
4299
4300 err_out:
4301         tg3_free_consistent(tp);
4302         return -ENOMEM;
4303 }
4304
4305 #define MAX_WAIT_CNT 1000
4306
4307 /* To stop a block, clear the enable bit and poll till it
4308  * clears.  tp->lock is held.
4309  */
4310 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4311 {
4312         unsigned int i;
4313         u32 val;
4314
4315         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4316                 switch (ofs) {
4317                 case RCVLSC_MODE:
4318                 case DMAC_MODE:
4319                 case MBFREE_MODE:
4320                 case BUFMGR_MODE:
4321                 case MEMARB_MODE:
4322                         /* We can't enable/disable these bits of the
4323                          * 5705/5750, just say success.
4324                          */
4325                         return 0;
4326
4327                 default:
4328                         break;
4329                 };
4330         }
4331
4332         val = tr32(ofs);
4333         val &= ~enable_bit;
4334         tw32_f(ofs, val);
4335
4336         for (i = 0; i < MAX_WAIT_CNT; i++) {
4337                 udelay(100);
4338                 val = tr32(ofs);
4339                 if ((val & enable_bit) == 0)
4340                         break;
4341         }
4342
4343         if (i == MAX_WAIT_CNT && !silent) {
4344                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4345                        "ofs=%lx enable_bit=%x\n",
4346                        ofs, enable_bit);
4347                 return -ENODEV;
4348         }
4349
4350         return 0;
4351 }
4352
4353 /* tp->lock is held. */
4354 static int tg3_abort_hw(struct tg3 *tp, int silent)
4355 {
4356         int i, err;
4357
4358         tg3_disable_ints(tp);
4359
4360         tp->rx_mode &= ~RX_MODE_ENABLE;
4361         tw32_f(MAC_RX_MODE, tp->rx_mode);
4362         udelay(10);
4363
4364         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4365         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4366         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4367         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4368         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4369         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4370
4371         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4372         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4373         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4374         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4375         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4376         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4377         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4378
4379         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4380         tw32_f(MAC_MODE, tp->mac_mode);
4381         udelay(40);
4382
4383         tp->tx_mode &= ~TX_MODE_ENABLE;
4384         tw32_f(MAC_TX_MODE, tp->tx_mode);
4385
4386         for (i = 0; i < MAX_WAIT_CNT; i++) {
4387                 udelay(100);
4388                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4389                         break;
4390         }
4391         if (i >= MAX_WAIT_CNT) {
4392                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4393                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4394                        tp->dev->name, tr32(MAC_TX_MODE));
4395                 err |= -ENODEV;
4396         }
4397
4398         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4399         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4400         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4401
4402         tw32(FTQ_RESET, 0xffffffff);
4403         tw32(FTQ_RESET, 0x00000000);
4404
4405         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4406         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4407
4408         if (tp->hw_status)
4409                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4410         if (tp->hw_stats)
4411                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4412
4413         return err;
4414 }
4415
4416 /* tp->lock is held. */
4417 static int tg3_nvram_lock(struct tg3 *tp)
4418 {
4419         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4420                 int i;
4421
4422                 if (tp->nvram_lock_cnt == 0) {
4423                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4424                         for (i = 0; i < 8000; i++) {
4425                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4426                                         break;
4427                                 udelay(20);
4428                         }
4429                         if (i == 8000) {
4430                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4431                                 return -ENODEV;
4432                         }
4433                 }
4434                 tp->nvram_lock_cnt++;
4435         }
4436         return 0;
4437 }
4438
4439 /* tp->lock is held. */
4440 static void tg3_nvram_unlock(struct tg3 *tp)
4441 {
4442         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4443                 if (tp->nvram_lock_cnt > 0)
4444                         tp->nvram_lock_cnt--;
4445                 if (tp->nvram_lock_cnt == 0)
4446                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4447         }
4448 }
4449
4450 /* tp->lock is held. */
4451 static void tg3_enable_nvram_access(struct tg3 *tp)
4452 {
4453         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4454             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4455                 u32 nvaccess = tr32(NVRAM_ACCESS);
4456
4457                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4458         }
4459 }
4460
4461 /* tp->lock is held. */
4462 static void tg3_disable_nvram_access(struct tg3 *tp)
4463 {
4464         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4465             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4466                 u32 nvaccess = tr32(NVRAM_ACCESS);
4467
4468                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4469         }
4470 }
4471
4472 /* tp->lock is held. */
4473 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4474 {
4475         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4476                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4477                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4478
4479         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4480                 switch (kind) {
4481                 case RESET_KIND_INIT:
4482                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4483                                       DRV_STATE_START);
4484                         break;
4485
4486                 case RESET_KIND_SHUTDOWN:
4487                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4488                                       DRV_STATE_UNLOAD);
4489                         break;
4490
4491                 case RESET_KIND_SUSPEND:
4492                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4493                                       DRV_STATE_SUSPEND);
4494                         break;
4495
4496                 default:
4497                         break;
4498                 };
4499         }
4500 }
4501
4502 /* tp->lock is held. */
4503 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4504 {
4505         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4506                 switch (kind) {
4507                 case RESET_KIND_INIT:
4508                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4509                                       DRV_STATE_START_DONE);
4510                         break;
4511
4512                 case RESET_KIND_SHUTDOWN:
4513                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4514                                       DRV_STATE_UNLOAD_DONE);
4515                         break;
4516
4517                 default:
4518                         break;
4519                 };
4520         }
4521 }
4522
4523 /* tp->lock is held. */
4524 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4525 {
4526         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4527                 switch (kind) {
4528                 case RESET_KIND_INIT:
4529                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4530                                       DRV_STATE_START);
4531                         break;
4532
4533                 case RESET_KIND_SHUTDOWN:
4534                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4535                                       DRV_STATE_UNLOAD);
4536                         break;
4537
4538                 case RESET_KIND_SUSPEND:
4539                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4540                                       DRV_STATE_SUSPEND);
4541                         break;
4542
4543                 default:
4544                         break;
4545                 };
4546         }
4547 }
4548
4549 static void tg3_stop_fw(struct tg3 *);
4550
4551 /* tp->lock is held. */
4552 static int tg3_chip_reset(struct tg3 *tp)
4553 {
4554         u32 val;
4555         void (*write_op)(struct tg3 *, u32, u32);
4556         int i;
4557
4558         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4559                 tg3_nvram_lock(tp);
4560                 /* No matching tg3_nvram_unlock() after this because
4561                  * chip reset below will undo the nvram lock.
4562                  */
4563                 tp->nvram_lock_cnt = 0;
4564         }
4565
4566         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4567             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4568             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4569                 tw32(GRC_FASTBOOT_PC, 0);
4570
4571         /*
4572          * We must avoid the readl() that normally takes place.
4573          * It locks machines, causes machine checks, and other
4574          * fun things.  So, temporarily disable the 5701
4575          * hardware workaround, while we do the reset.
4576          */
4577         write_op = tp->write32;
4578         if (write_op == tg3_write_flush_reg32)
4579                 tp->write32 = tg3_write32;
4580
4581         /* do the reset */
4582         val = GRC_MISC_CFG_CORECLK_RESET;
4583
4584         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4585                 if (tr32(0x7e2c) == 0x60) {
4586                         tw32(0x7e2c, 0x20);
4587                 }
4588                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4589                         tw32(GRC_MISC_CFG, (1 << 29));
4590                         val |= (1 << 29);
4591                 }
4592         }
4593
4594         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4595                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4596         tw32(GRC_MISC_CFG, val);
4597
4598         /* restore 5701 hardware bug workaround write method */
4599         tp->write32 = write_op;
4600
4601         /* Unfortunately, we have to delay before the PCI read back.
4602          * Some 575X chips even will not respond to a PCI cfg access
4603          * when the reset command is given to the chip.
4604          *
4605          * How do these hardware designers expect things to work
4606          * properly if the PCI write is posted for a long period
4607          * of time?  It is always necessary to have some method by
4608          * which a register read back can occur to push the write
4609          * out which does the reset.
4610          *
4611          * For most tg3 variants the trick below was working.
4612          * Ho hum...
4613          */
4614         udelay(120);
4615
4616         /* Flush PCI posted writes.  The normal MMIO registers
4617          * are inaccessible at this time so this is the only
4618          * way to make this reliably (actually, this is no longer
4619          * the case, see above).  I tried to use indirect
4620          * register read/write but this upset some 5701 variants.
4621          */
4622         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4623
4624         udelay(120);
4625
4626         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4627                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4628                         int i;
4629                         u32 cfg_val;
4630
4631                         /* Wait for link training to complete.  */
4632                         for (i = 0; i < 5000; i++)
4633                                 udelay(100);
4634
4635                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4636                         pci_write_config_dword(tp->pdev, 0xc4,
4637                                                cfg_val | (1 << 15));
4638                 }
4639                 /* Set PCIE max payload size and clear error status.  */
4640                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4641         }
4642
4643         /* Re-enable indirect register accesses. */
4644         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4645                                tp->misc_host_ctrl);
4646
4647         /* Set MAX PCI retry to zero. */
4648         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4649         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4650             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4651                 val |= PCISTATE_RETRY_SAME_DMA;
4652         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4653
4654         pci_restore_state(tp->pdev);
4655
4656         /* Make sure PCI-X relaxed ordering bit is clear. */
4657         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4658         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4659         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4660
4661         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4662                 u32 val;
4663
4664                 /* Chip reset on 5780 will reset MSI enable bit,
4665                  * so need to restore it.
4666                  */
4667                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4668                         u16 ctrl;
4669
4670                         pci_read_config_word(tp->pdev,
4671                                              tp->msi_cap + PCI_MSI_FLAGS,
4672                                              &ctrl);
4673                         pci_write_config_word(tp->pdev,
4674                                               tp->msi_cap + PCI_MSI_FLAGS,
4675                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4676                         val = tr32(MSGINT_MODE);
4677                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4678                 }
4679
4680                 val = tr32(MEMARB_MODE);
4681                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4682
4683         } else
4684                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4685
4686         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4687                 tg3_stop_fw(tp);
4688                 tw32(0x5000, 0x400);
4689         }
4690
4691         tw32(GRC_MODE, tp->grc_mode);
4692
4693         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4694                 u32 val = tr32(0xc4);
4695
4696                 tw32(0xc4, val | (1 << 15));
4697         }
4698
4699         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4700             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4701                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4702                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4703                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4704                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4705         }
4706
4707         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4708                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4709                 tw32_f(MAC_MODE, tp->mac_mode);
4710         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4711                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4712                 tw32_f(MAC_MODE, tp->mac_mode);
4713         } else
4714                 tw32_f(MAC_MODE, 0);
4715         udelay(40);
4716
4717         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4718                 /* Wait for firmware initialization to complete. */
4719                 for (i = 0; i < 100000; i++) {
4720                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4721                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4722                                 break;
4723                         udelay(10);
4724                 }
4725                 if (i >= 100000) {
4726                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4727                                "firmware will not restart magic=%08x\n",
4728                                tp->dev->name, val);
4729                         return -ENODEV;
4730                 }
4731         }
4732
4733         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4734             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4735                 u32 val = tr32(0x7c00);
4736
4737                 tw32(0x7c00, val | (1 << 25));
4738         }
4739
4740         /* Reprobe ASF enable state.  */
4741         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4742         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4743         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4744         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4745                 u32 nic_cfg;
4746
4747                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4748                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4749                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4750                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4751                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4752                 }
4753         }
4754
4755         return 0;
4756 }
4757
4758 /* tp->lock is held. */
4759 static void tg3_stop_fw(struct tg3 *tp)
4760 {
4761         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4762                 u32 val;
4763                 int i;
4764
4765                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4766                 val = tr32(GRC_RX_CPU_EVENT);
4767                 val |= (1 << 14);
4768                 tw32(GRC_RX_CPU_EVENT, val);
4769
4770                 /* Wait for RX cpu to ACK the event.  */
4771                 for (i = 0; i < 100; i++) {
4772                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4773                                 break;
4774                         udelay(1);
4775                 }
4776         }
4777 }
4778
4779 /* tp->lock is held. */
4780 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4781 {
4782         int err;
4783
4784         tg3_stop_fw(tp);
4785
4786         tg3_write_sig_pre_reset(tp, kind);
4787
4788         tg3_abort_hw(tp, silent);
4789         err = tg3_chip_reset(tp);
4790
4791         tg3_write_sig_legacy(tp, kind);
4792         tg3_write_sig_post_reset(tp, kind);
4793
4794         if (err)
4795                 return err;
4796
4797         return 0;
4798 }
4799
4800 #define TG3_FW_RELEASE_MAJOR    0x0
4801 #define TG3_FW_RELASE_MINOR     0x0
4802 #define TG3_FW_RELEASE_FIX      0x0
4803 #define TG3_FW_START_ADDR       0x08000000
4804 #define TG3_FW_TEXT_ADDR        0x08000000
4805 #define TG3_FW_TEXT_LEN         0x9c0
4806 #define TG3_FW_RODATA_ADDR      0x080009c0
4807 #define TG3_FW_RODATA_LEN       0x60
4808 #define TG3_FW_DATA_ADDR        0x08000a40
4809 #define TG3_FW_DATA_LEN         0x20
4810 #define TG3_FW_SBSS_ADDR        0x08000a60
4811 #define TG3_FW_SBSS_LEN         0xc
4812 #define TG3_FW_BSS_ADDR         0x08000a70
4813 #define TG3_FW_BSS_LEN          0x10
4814
4815 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4816         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4817         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4818         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4819         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4820         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4821         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4822         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4823         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4824         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4825         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4826         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4827         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4828         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4829         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4830         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4831         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4832         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4833         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4834         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4835         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4836         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4837         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4838         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4839         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4840         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4841         0, 0, 0, 0, 0, 0,
4842         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4843         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4844         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4845         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4846         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4847         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4848         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4849         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4850         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4851         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4852         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4853         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4854         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4855         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4856         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4857         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4858         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4859         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4860         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4861         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4862         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4863         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4864         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4865         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4866         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4867         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4868         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4869         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4870         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4871         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4872         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4873         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4874         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4875         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4876         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4877         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4878         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4879         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4880         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4881         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4882         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4883         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4884         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4885         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4886         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4887         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4888         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4889         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4890         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4891         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4892         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4893         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4894         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4895         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4896         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4897         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4898         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4899         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4900         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4901         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4902         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4903         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4904         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4905         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4906         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4907 };
4908
4909 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4910         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4911         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4912         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4913         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4914         0x00000000
4915 };
4916
4917 #if 0 /* All zeros, don't eat up space with it. */
4918 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4919         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4920         0x00000000, 0x00000000, 0x00000000, 0x00000000
4921 };
4922 #endif
4923
4924 #define RX_CPU_SCRATCH_BASE     0x30000
4925 #define RX_CPU_SCRATCH_SIZE     0x04000
4926 #define TX_CPU_SCRATCH_BASE     0x34000
4927 #define TX_CPU_SCRATCH_SIZE     0x04000
4928
4929 /* tp->lock is held. */
4930 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4931 {
4932         int i;
4933
4934         BUG_ON(offset == TX_CPU_BASE &&
4935             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
4936
4937         if (offset == RX_CPU_BASE) {
4938                 for (i = 0; i < 10000; i++) {
4939                         tw32(offset + CPU_STATE, 0xffffffff);
4940                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4941                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4942                                 break;
4943                 }
4944
4945                 tw32(offset + CPU_STATE, 0xffffffff);
4946                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4947                 udelay(10);
4948         } else {
4949                 for (i = 0; i < 10000; i++) {
4950                         tw32(offset + CPU_STATE, 0xffffffff);
4951                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4952                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4953                                 break;
4954                 }
4955         }
4956
4957         if (i >= 10000) {
4958                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4959                        "and %s CPU\n",
4960                        tp->dev->name,
4961                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4962                 return -ENODEV;
4963         }
4964
4965         /* Clear firmware's nvram arbitration. */
4966         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4967                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4968         return 0;
4969 }
4970
4971 struct fw_info {
4972         unsigned int text_base;
4973         unsigned int text_len;
4974         u32 *text_data;
4975         unsigned int rodata_base;
4976         unsigned int rodata_len;
4977         u32 *rodata_data;
4978         unsigned int data_base;
4979         unsigned int data_len;
4980         u32 *data_data;
4981 };
4982
4983 /* tp->lock is held. */
4984 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4985                                  int cpu_scratch_size, struct fw_info *info)
4986 {
4987         int err, lock_err, i;
4988         void (*write_op)(struct tg3 *, u32, u32);
4989
4990         if (cpu_base == TX_CPU_BASE &&
4991             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4992                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4993                        "TX cpu firmware on %s which is 5705.\n",
4994                        tp->dev->name);
4995                 return -EINVAL;
4996         }
4997
4998         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4999                 write_op = tg3_write_mem;
5000         else
5001                 write_op = tg3_write_indirect_reg32;
5002
5003         /* It is possible that bootcode is still loading at this point.
5004          * Get the nvram lock first before halting the cpu.
5005          */
5006         lock_err = tg3_nvram_lock(tp);
5007         err = tg3_halt_cpu(tp, cpu_base);
5008         if (!lock_err)
5009                 tg3_nvram_unlock(tp);
5010         if (err)
5011                 goto out;
5012
5013         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5014                 write_op(tp, cpu_scratch_base + i, 0);
5015         tw32(cpu_base + CPU_STATE, 0xffffffff);
5016         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5017         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5018                 write_op(tp, (cpu_scratch_base +
5019                               (info->text_base & 0xffff) +
5020                               (i * sizeof(u32))),
5021                          (info->text_data ?
5022                           info->text_data[i] : 0));
5023         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5024                 write_op(tp, (cpu_scratch_base +
5025                               (info->rodata_base & 0xffff) +
5026                               (i * sizeof(u32))),
5027                          (info->rodata_data ?
5028                           info->rodata_data[i] : 0));
5029         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5030                 write_op(tp, (cpu_scratch_base +
5031                               (info->data_base & 0xffff) +
5032                               (i * sizeof(u32))),
5033                          (info->data_data ?
5034                           info->data_data[i] : 0));
5035
5036         err = 0;
5037
5038 out:
5039         return err;
5040 }
5041
5042 /* tp->lock is held. */
5043 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5044 {
5045         struct fw_info info;
5046         int err, i;
5047
5048         info.text_base = TG3_FW_TEXT_ADDR;
5049         info.text_len = TG3_FW_TEXT_LEN;
5050         info.text_data = &tg3FwText[0];
5051         info.rodata_base = TG3_FW_RODATA_ADDR;
5052         info.rodata_len = TG3_FW_RODATA_LEN;
5053         info.rodata_data = &tg3FwRodata[0];
5054         info.data_base = TG3_FW_DATA_ADDR;
5055         info.data_len = TG3_FW_DATA_LEN;
5056         info.data_data = NULL;
5057
5058         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5059                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5060                                     &info);
5061         if (err)
5062                 return err;
5063
5064         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5065                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5066                                     &info);
5067         if (err)
5068                 return err;
5069
5070         /* Now startup only the RX cpu. */
5071         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5072         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5073
5074         for (i = 0; i < 5; i++) {
5075                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5076                         break;
5077                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5078                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5079                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5080                 udelay(1000);
5081         }
5082         if (i >= 5) {
5083                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5084                        "to set RX CPU PC, is %08x should be %08x\n",
5085                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5086                        TG3_FW_TEXT_ADDR);
5087                 return -ENODEV;
5088         }
5089         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5090         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5091
5092         return 0;
5093 }
5094
5095 #if TG3_TSO_SUPPORT != 0
5096
5097 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5098 #define TG3_TSO_FW_RELASE_MINOR         0x6
5099 #define TG3_TSO_FW_RELEASE_FIX          0x0
5100 #define TG3_TSO_FW_START_ADDR           0x08000000
5101 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5102 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5103 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5104 #define TG3_TSO_FW_RODATA_LEN           0x60
5105 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5106 #define TG3_TSO_FW_DATA_LEN             0x30
5107 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5108 #define TG3_TSO_FW_SBSS_LEN             0x2c
5109 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5110 #define TG3_TSO_FW_BSS_LEN              0x894
5111
5112 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5113         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5114         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5115         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5116         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5117         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5118         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5119         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5120         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5121         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5122         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5123         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5124         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5125         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5126         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5127         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5128         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5129         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5130         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5131         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5132         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5133         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5134         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5135         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5136         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5137         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5138         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5139         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5140         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5141         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5142         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5143         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5144         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5145         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5146         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5147         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5148         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5149         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5150         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5151         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5152         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5153         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5154         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5155         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5156         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5157         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5158         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5159         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5160         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5161         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5162         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5163         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5164         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5165         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5166         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5167         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5168         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5169         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5170         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5171         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5172         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5173         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5174         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5175         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5176         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5177         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5178         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5179         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5180         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5181         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5182         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5183         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5184         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5185         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5186         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5187         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5188         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5189         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5190         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5191         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5192         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5193         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5194         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5195         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5196         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5197         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5198         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5199         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5200         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5201         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5202         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5203         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5204         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5205         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5206         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5207         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5208         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5209         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5210         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5211         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5212         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5213         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5214         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5215         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5216         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5217         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5218         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5219         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5220         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5221         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5222         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5223         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5224         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5225         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5226         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5227         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5228         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5229         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5230         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5231         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5232         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5233         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5234         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5235         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5236         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5237         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5238         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5239         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5240         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5241         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5242         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5243         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5244         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5245         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5246         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5247         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5248         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5249         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5250         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5251         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5252         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5253         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5254         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5255         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5256         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5257         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5258         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5259         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5260         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5261         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5262         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5263         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5264         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5265         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5266         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5267         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5268         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5269         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5270         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5271         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5272         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5273         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5274         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5275         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5276         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5277         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5278         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5279         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5280         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5281         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5282         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5283         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5284         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5285         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5286         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5287         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5288         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5289         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5290         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5291         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5292         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5293         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5294         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5295         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5296         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5297         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5298         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5299         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5300         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5301         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5302         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5303         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5304         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5305         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5306         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5307         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5308         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5309         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5310         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5311         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5312         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5313         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5314         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5315         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5316         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5317         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5318         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5319         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5320         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5321         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5322         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5323         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5324         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5325         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5326         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5327         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5328         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5329         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5330         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5331         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5332         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5333         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5334         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5335         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5336         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5337         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5338         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5339         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5340         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5341         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5342         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5343         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5344         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5345         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5346         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5347         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5348         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5349         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5350         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5351         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5352         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5353         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5354         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5355         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5356         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5357         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5358         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5359         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5360         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5361         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5362         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5363         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5364         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5365         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5366         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5367         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5368         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5369         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5370         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5371         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5372         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5373         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5374         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5375         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5376         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5377         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5378         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5379         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5380         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5381         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5382         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5383         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5384         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5385         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5386         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5387         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5388         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5389         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5390         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5391         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5392         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5393         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5394         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5395         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5396         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5397 };
5398
5399 static u32 tg3TsoFwRodata[] = {
5400         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5401         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5402         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5403         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5404         0x00000000,
5405 };
5406
5407 static u32 tg3TsoFwData[] = {
5408         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5409         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5410         0x00000000,
5411 };
5412
5413 /* 5705 needs a special version of the TSO firmware.  */
5414 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5415 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5416 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5417 #define TG3_TSO5_FW_START_ADDR          0x00010000
5418 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5419 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5420 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5421 #define TG3_TSO5_FW_RODATA_LEN          0x50
5422 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5423 #define TG3_TSO5_FW_DATA_LEN            0x20
5424 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5425 #define TG3_TSO5_FW_SBSS_LEN            0x28
5426 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5427 #define TG3_TSO5_FW_BSS_LEN             0x88
5428
5429 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5430         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5431         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5432         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5433         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5434         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5435         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5436         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5437         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5438         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5439         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5440         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5441         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5442         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5443         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5444         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5445         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5446         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5447         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5448         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5449         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5450         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5451         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5452         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5453         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5454         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5455         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5456         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5457         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5458         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5459         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5460         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5461         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5462         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5463         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5464         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5465         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5466         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5467         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5468         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5469         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5470         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5471         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5472         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5473         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5474         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5475         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5476         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5477         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5478         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5479         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5480         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5481         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5482         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5483         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5484         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5485         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5486         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5487         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5488         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5489         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5490         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5491         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5492         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5493         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5494         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5495         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5496         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5497         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5498         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5499         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5500         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5501         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5502         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5503         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5504         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5505         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5506         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5507         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5508         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5509         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5510         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5511         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5512         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5513         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5514         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5515         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5516         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5517         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5518         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5519         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5520         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5521         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5522         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5523         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5524         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5525         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5526         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5527         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5528         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5529         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5530         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5531         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5532         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5533         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5534         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5535         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5536         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5537         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5538         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5539         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5540         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5541         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5542         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5543         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5544         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5545         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5546         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5547         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5548         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5549         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5550         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5551         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5552         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5553         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5554         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5555         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5556         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5557         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5558         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5559         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5560         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5561         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5562         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5563         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5564         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5565         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5566         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5567         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5568         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5569         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5570         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5571         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5572         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5573         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5574         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5575         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5576         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5577         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5578         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5579         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5580         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5581         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5582         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5583         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5584         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5585         0x00000000, 0x00000000, 0x00000000,
5586 };
5587
5588 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5589         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5590         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5591         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5592         0x00000000, 0x00000000, 0x00000000,
5593 };
5594
5595 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5596         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5597         0x00000000, 0x00000000, 0x00000000,
5598 };
5599
5600 /* tp->lock is held. */
5601 static int tg3_load_tso_firmware(struct tg3 *tp)
5602 {
5603         struct fw_info info;
5604         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5605         int err, i;
5606
5607         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5608                 return 0;
5609
5610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5611                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5612                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5613                 info.text_data = &tg3Tso5FwText[0];
5614                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5615                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5616                 info.rodata_data = &tg3Tso5FwRodata[0];
5617                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5618                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5619                 info.data_data = &tg3Tso5FwData[0];
5620                 cpu_base = RX_CPU_BASE;
5621                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5622                 cpu_scratch_size = (info.text_len +
5623                                     info.rodata_len +
5624                                     info.data_len +
5625                                     TG3_TSO5_FW_SBSS_LEN +
5626                                     TG3_TSO5_FW_BSS_LEN);
5627         } else {
5628                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5629                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5630                 info.text_data = &tg3TsoFwText[0];
5631                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5632                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5633                 info.rodata_data = &tg3TsoFwRodata[0];
5634                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5635                 info.data_len = TG3_TSO_FW_DATA_LEN;
5636                 info.data_data = &tg3TsoFwData[0];
5637                 cpu_base = TX_CPU_BASE;
5638                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5639                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5640         }
5641
5642         err = tg3_load_firmware_cpu(tp, cpu_base,
5643                                     cpu_scratch_base, cpu_scratch_size,
5644                                     &info);
5645         if (err)
5646                 return err;
5647
5648         /* Now startup the cpu. */
5649         tw32(cpu_base + CPU_STATE, 0xffffffff);
5650         tw32_f(cpu_base + CPU_PC,    info.text_base);
5651
5652         for (i = 0; i < 5; i++) {
5653                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5654                         break;
5655                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5656                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5657                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5658                 udelay(1000);
5659         }
5660         if (i >= 5) {
5661                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5662                        "to set CPU PC, is %08x should be %08x\n",
5663                        tp->dev->name, tr32(cpu_base + CPU_PC),
5664                        info.text_base);
5665                 return -ENODEV;
5666         }
5667         tw32(cpu_base + CPU_STATE, 0xffffffff);
5668         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5669         return 0;
5670 }
5671
5672 #endif /* TG3_TSO_SUPPORT != 0 */
5673
5674 /* tp->lock is held. */
5675 static void __tg3_set_mac_addr(struct tg3 *tp)
5676 {
5677         u32 addr_high, addr_low;
5678         int i;
5679
5680         addr_high = ((tp->dev->dev_addr[0] << 8) |
5681                      tp->dev->dev_addr[1]);
5682         addr_low = ((tp->dev->dev_addr[2] << 24) |
5683                     (tp->dev->dev_addr[3] << 16) |
5684                     (tp->dev->dev_addr[4] <<  8) |
5685                     (tp->dev->dev_addr[5] <<  0));
5686         for (i = 0; i < 4; i++) {
5687                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5688                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5689         }
5690
5691         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5692             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5693                 for (i = 0; i < 12; i++) {
5694                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5695                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5696                 }
5697         }
5698
5699         addr_high = (tp->dev->dev_addr[0] +
5700                      tp->dev->dev_addr[1] +
5701                      tp->dev->dev_addr[2] +
5702                      tp->dev->dev_addr[3] +
5703                      tp->dev->dev_addr[4] +
5704                      tp->dev->dev_addr[5]) &
5705                 TX_BACKOFF_SEED_MASK;
5706         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5707 }
5708
5709 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5710 {
5711         struct tg3 *tp = netdev_priv(dev);
5712         struct sockaddr *addr = p;
5713
5714         if (!is_valid_ether_addr(addr->sa_data))
5715                 return -EINVAL;
5716
5717         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5718
5719         if (!netif_running(dev))
5720                 return 0;
5721
5722         spin_lock_bh(&tp->lock);
5723         __tg3_set_mac_addr(tp);
5724         spin_unlock_bh(&tp->lock);
5725
5726         return 0;
5727 }
5728
5729 /* tp->lock is held. */
5730 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5731                            dma_addr_t mapping, u32 maxlen_flags,
5732                            u32 nic_addr)
5733 {
5734         tg3_write_mem(tp,
5735                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5736                       ((u64) mapping >> 32));
5737         tg3_write_mem(tp,
5738                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5739                       ((u64) mapping & 0xffffffff));
5740         tg3_write_mem(tp,
5741                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5742                        maxlen_flags);
5743
5744         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5745                 tg3_write_mem(tp,
5746                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5747                               nic_addr);
5748 }
5749
5750 static void __tg3_set_rx_mode(struct net_device *);
5751 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5752 {
5753         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5754         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5755         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5756         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5757         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5758                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5759                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5760         }
5761         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5762         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5763         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5764                 u32 val = ec->stats_block_coalesce_usecs;
5765
5766                 if (!netif_carrier_ok(tp->dev))
5767                         val = 0;
5768
5769                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5770         }
5771 }
5772
5773 /* tp->lock is held. */
5774 static int tg3_reset_hw(struct tg3 *tp)
5775 {
5776         u32 val, rdmac_mode;
5777         int i, err, limit;
5778
5779         tg3_disable_ints(tp);
5780
5781         tg3_stop_fw(tp);
5782
5783         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5784
5785         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5786                 tg3_abort_hw(tp, 1);
5787         }
5788
5789         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5790                 tg3_phy_reset(tp);
5791
5792         err = tg3_chip_reset(tp);
5793         if (err)
5794                 return err;
5795
5796         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5797
5798         /* This works around an issue with Athlon chipsets on
5799          * B3 tigon3 silicon.  This bit has no effect on any
5800          * other revision.  But do not set this on PCI Express
5801          * chips.
5802          */
5803         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5804                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5805         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5806
5807         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5808             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5809                 val = tr32(TG3PCI_PCISTATE);
5810                 val |= PCISTATE_RETRY_SAME_DMA;
5811                 tw32(TG3PCI_PCISTATE, val);
5812         }
5813
5814         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5815                 /* Enable some hw fixes.  */
5816                 val = tr32(TG3PCI_MSI_DATA);
5817                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5818                 tw32(TG3PCI_MSI_DATA, val);
5819         }
5820
5821         /* Descriptor ring init may make accesses to the
5822          * NIC SRAM area to setup the TX descriptors, so we
5823          * can only do this after the hardware has been
5824          * successfully reset.
5825          */
5826         tg3_init_rings(tp);
5827
5828         /* This value is determined during the probe time DMA
5829          * engine test, tg3_test_dma.
5830          */
5831         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5832
5833         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5834                           GRC_MODE_4X_NIC_SEND_RINGS |
5835                           GRC_MODE_NO_TX_PHDR_CSUM |
5836                           GRC_MODE_NO_RX_PHDR_CSUM);
5837         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5838         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5839                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5840         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5841                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5842
5843         tw32(GRC_MODE,
5844              tp->grc_mode |
5845              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5846
5847         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5848         val = tr32(GRC_MISC_CFG);
5849         val &= ~0xff;
5850         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5851         tw32(GRC_MISC_CFG, val);
5852
5853         /* Initialize MBUF/DESC pool. */
5854         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5855                 /* Do nothing.  */
5856         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5857                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5858                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5859                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5860                 else
5861                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5862                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5863                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5864         }
5865 #if TG3_TSO_SUPPORT != 0
5866         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5867                 int fw_len;
5868
5869                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5870                           TG3_TSO5_FW_RODATA_LEN +
5871                           TG3_TSO5_FW_DATA_LEN +
5872                           TG3_TSO5_FW_SBSS_LEN +
5873                           TG3_TSO5_FW_BSS_LEN);
5874                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5875                 tw32(BUFMGR_MB_POOL_ADDR,
5876                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5877                 tw32(BUFMGR_MB_POOL_SIZE,
5878                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5879         }
5880 #endif
5881
5882         if (tp->dev->mtu <= ETH_DATA_LEN) {
5883                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5884                      tp->bufmgr_config.mbuf_read_dma_low_water);
5885                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5886                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5887                 tw32(BUFMGR_MB_HIGH_WATER,
5888                      tp->bufmgr_config.mbuf_high_water);
5889         } else {
5890                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5891                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5892                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5893                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5894                 tw32(BUFMGR_MB_HIGH_WATER,
5895                      tp->bufmgr_config.mbuf_high_water_jumbo);
5896         }
5897         tw32(BUFMGR_DMA_LOW_WATER,
5898              tp->bufmgr_config.dma_low_water);
5899         tw32(BUFMGR_DMA_HIGH_WATER,
5900              tp->bufmgr_config.dma_high_water);
5901
5902         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5903         for (i = 0; i < 2000; i++) {
5904                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5905                         break;
5906                 udelay(10);
5907         }
5908         if (i >= 2000) {
5909                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5910                        tp->dev->name);
5911                 return -ENODEV;
5912         }
5913
5914         /* Setup replenish threshold. */
5915         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5916
5917         /* Initialize TG3_BDINFO's at:
5918          *  RCVDBDI_STD_BD:     standard eth size rx ring
5919          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5920          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5921          *
5922          * like so:
5923          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5924          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5925          *                              ring attribute flags
5926          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5927          *
5928          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5929          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5930          *
5931          * The size of each ring is fixed in the firmware, but the location is
5932          * configurable.
5933          */
5934         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5935              ((u64) tp->rx_std_mapping >> 32));
5936         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5937              ((u64) tp->rx_std_mapping & 0xffffffff));
5938         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5939              NIC_SRAM_RX_BUFFER_DESC);
5940
5941         /* Don't even try to program the JUMBO/MINI buffer descriptor
5942          * configs on 5705.
5943          */
5944         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5945                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5946                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5947         } else {
5948                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5949                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5950
5951                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5952                      BDINFO_FLAGS_DISABLED);
5953
5954                 /* Setup replenish threshold. */
5955                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5956
5957                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5958                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5959                              ((u64) tp->rx_jumbo_mapping >> 32));
5960                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5961                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5962                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5963                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5964                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5965                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5966                 } else {
5967                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5968                              BDINFO_FLAGS_DISABLED);
5969                 }
5970
5971         }
5972
5973         /* There is only one send ring on 5705/5750, no need to explicitly
5974          * disable the others.
5975          */
5976         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5977                 /* Clear out send RCB ring in SRAM. */
5978                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5979                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5980                                       BDINFO_FLAGS_DISABLED);
5981         }
5982
5983         tp->tx_prod = 0;
5984         tp->tx_cons = 0;
5985         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5986         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5987
5988         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5989                        tp->tx_desc_mapping,
5990                        (TG3_TX_RING_SIZE <<
5991                         BDINFO_FLAGS_MAXLEN_SHIFT),
5992                        NIC_SRAM_TX_BUFFER_DESC);
5993
5994         /* There is only one receive return ring on 5705/5750, no need
5995          * to explicitly disable the others.
5996          */
5997         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5998                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5999                      i += TG3_BDINFO_SIZE) {
6000                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6001                                       BDINFO_FLAGS_DISABLED);
6002                 }
6003         }
6004
6005         tp->rx_rcb_ptr = 0;
6006         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6007
6008         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6009                        tp->rx_rcb_mapping,
6010                        (TG3_RX_RCB_RING_SIZE(tp) <<
6011                         BDINFO_FLAGS_MAXLEN_SHIFT),
6012                        0);
6013
6014         tp->rx_std_ptr = tp->rx_pending;
6015         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6016                      tp->rx_std_ptr);
6017
6018         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6019                                                 tp->rx_jumbo_pending : 0;
6020         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6021                      tp->rx_jumbo_ptr);
6022
6023         /* Initialize MAC address and backoff seed. */
6024         __tg3_set_mac_addr(tp);
6025
6026         /* MTU + ethernet header + FCS + optional VLAN tag */
6027         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6028
6029         /* The slot time is changed by tg3_setup_phy if we
6030          * run at gigabit with half duplex.
6031          */
6032         tw32(MAC_TX_LENGTHS,
6033              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6034              (6 << TX_LENGTHS_IPG_SHIFT) |
6035              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6036
6037         /* Receive rules. */
6038         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6039         tw32(RCVLPC_CONFIG, 0x0181);
6040
6041         /* Calculate RDMAC_MODE setting early, we need it to determine
6042          * the RCVLPC_STATE_ENABLE mask.
6043          */
6044         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6045                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6046                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6047                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6048                       RDMAC_MODE_LNGREAD_ENAB);
6049         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6050                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6051
6052         /* If statement applies to 5705 and 5750 PCI devices only */
6053         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6054              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6055             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6056                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6057                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6058                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6059                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6060                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6061                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6062                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6063                 }
6064         }
6065
6066         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6067                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6068
6069 #if TG3_TSO_SUPPORT != 0
6070         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6071                 rdmac_mode |= (1 << 27);
6072 #endif
6073
6074         /* Receive/send statistics. */
6075         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6076             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6077                 val = tr32(RCVLPC_STATS_ENABLE);
6078                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6079                 tw32(RCVLPC_STATS_ENABLE, val);
6080         } else {
6081                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6082         }
6083         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6084         tw32(SNDDATAI_STATSENAB, 0xffffff);
6085         tw32(SNDDATAI_STATSCTRL,
6086              (SNDDATAI_SCTRL_ENABLE |
6087               SNDDATAI_SCTRL_FASTUPD));
6088
6089         /* Setup host coalescing engine. */
6090         tw32(HOSTCC_MODE, 0);
6091         for (i = 0; i < 2000; i++) {
6092                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6093                         break;
6094                 udelay(10);
6095         }
6096
6097         __tg3_set_coalesce(tp, &tp->coal);
6098
6099         /* set status block DMA address */
6100         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6101              ((u64) tp->status_mapping >> 32));
6102         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6103              ((u64) tp->status_mapping & 0xffffffff));
6104
6105         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6106                 /* Status/statistics block address.  See tg3_timer,
6107                  * the tg3_periodic_fetch_stats call there, and
6108                  * tg3_get_stats to see how this works for 5705/5750 chips.
6109                  */
6110                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6111                      ((u64) tp->stats_mapping >> 32));
6112                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6113                      ((u64) tp->stats_mapping & 0xffffffff));
6114                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6115                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6116         }
6117
6118         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6119
6120         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6121         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6122         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6123                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6124
6125         /* Clear statistics/status block in chip, and status block in ram. */
6126         for (i = NIC_SRAM_STATS_BLK;
6127              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6128              i += sizeof(u32)) {
6129                 tg3_write_mem(tp, i, 0);
6130                 udelay(40);
6131         }
6132         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6133
6134         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6135                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6136                 /* reset to prevent losing 1st rx packet intermittently */
6137                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6138                 udelay(10);
6139         }
6140
6141         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6142                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6143         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6144         udelay(40);
6145
6146         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6147          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6148          * register to preserve the GPIO settings for LOMs. The GPIOs,
6149          * whether used as inputs or outputs, are set by boot code after
6150          * reset.
6151          */
6152         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6153                 u32 gpio_mask;
6154
6155                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6156                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6157
6158                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6159                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6160                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6161
6162                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6163                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6164
6165                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6166
6167                 /* GPIO1 must be driven high for eeprom write protect */
6168                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6169                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6170         }
6171         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6172         udelay(100);
6173
6174         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6175         tp->last_tag = 0;
6176
6177         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6178                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6179                 udelay(40);
6180         }
6181
6182         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6183                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6184                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6185                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6186                WDMAC_MODE_LNGREAD_ENAB);
6187
6188         /* If statement applies to 5705 and 5750 PCI devices only */
6189         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6190              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6191             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6192                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6193                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6194                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6195                         /* nothing */
6196                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6197                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6198                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6199                         val |= WDMAC_MODE_RX_ACCEL;
6200                 }
6201         }
6202
6203         /* Enable host coalescing bug fix */
6204         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6205             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6206                 val |= (1 << 29);
6207
6208         tw32_f(WDMAC_MODE, val);
6209         udelay(40);
6210
6211         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6212                 val = tr32(TG3PCI_X_CAPS);
6213                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6214                         val &= ~PCIX_CAPS_BURST_MASK;
6215                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6216                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6217                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6218                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6219                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6220                                 val |= (tp->split_mode_max_reqs <<
6221                                         PCIX_CAPS_SPLIT_SHIFT);
6222                 }
6223                 tw32(TG3PCI_X_CAPS, val);
6224         }
6225
6226         tw32_f(RDMAC_MODE, rdmac_mode);
6227         udelay(40);
6228
6229         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6230         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6231                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6232         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6233         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6234         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6235         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6236         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6237 #if TG3_TSO_SUPPORT != 0
6238         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6239                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6240 #endif
6241         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6242         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6243
6244         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6245                 err = tg3_load_5701_a0_firmware_fix(tp);
6246                 if (err)
6247                         return err;
6248         }
6249
6250 #if TG3_TSO_SUPPORT != 0
6251         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6252                 err = tg3_load_tso_firmware(tp);
6253                 if (err)
6254                         return err;
6255         }
6256 #endif
6257
6258         tp->tx_mode = TX_MODE_ENABLE;
6259         tw32_f(MAC_TX_MODE, tp->tx_mode);
6260         udelay(100);
6261
6262         tp->rx_mode = RX_MODE_ENABLE;
6263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6264                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6265
6266         tw32_f(MAC_RX_MODE, tp->rx_mode);
6267         udelay(10);
6268
6269         if (tp->link_config.phy_is_low_power) {
6270                 tp->link_config.phy_is_low_power = 0;
6271                 tp->link_config.speed = tp->link_config.orig_speed;
6272                 tp->link_config.duplex = tp->link_config.orig_duplex;
6273                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6274         }
6275
6276         tp->mi_mode = MAC_MI_MODE_BASE;
6277         tw32_f(MAC_MI_MODE, tp->mi_mode);
6278         udelay(80);
6279
6280         tw32(MAC_LED_CTRL, tp->led_ctrl);
6281
6282         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6283         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6284                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6285                 udelay(10);
6286         }
6287         tw32_f(MAC_RX_MODE, tp->rx_mode);
6288         udelay(10);
6289
6290         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6291                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6292                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6293                         /* Set drive transmission level to 1.2V  */
6294                         /* only if the signal pre-emphasis bit is not set  */
6295                         val = tr32(MAC_SERDES_CFG);
6296                         val &= 0xfffff000;
6297                         val |= 0x880;
6298                         tw32(MAC_SERDES_CFG, val);
6299                 }
6300                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6301                         tw32(MAC_SERDES_CFG, 0x616000);
6302         }
6303
6304         /* Prevent chip from dropping frames when flow control
6305          * is enabled.
6306          */
6307         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6308
6309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6310             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6311                 /* Use hardware link auto-negotiation */
6312                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6313         }
6314
6315         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6316             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6317                 u32 tmp;
6318
6319                 tmp = tr32(SERDES_RX_CTRL);
6320                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6321                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6322                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6323                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6324         }
6325
6326         err = tg3_setup_phy(tp, 1);
6327         if (err)
6328                 return err;
6329
6330         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6331                 u32 tmp;
6332
6333                 /* Clear CRC stats. */
6334                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6335                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6336                         tg3_readphy(tp, 0x14, &tmp);
6337                 }
6338         }
6339
6340         __tg3_set_rx_mode(tp->dev);
6341
6342         /* Initialize receive rules. */
6343         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6344         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6345         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6346         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6347
6348         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6349             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6350                 limit = 8;
6351         else
6352                 limit = 16;
6353         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6354                 limit -= 4;
6355         switch (limit) {
6356         case 16:
6357                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6358         case 15:
6359                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6360         case 14:
6361                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6362         case 13:
6363                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6364         case 12:
6365                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6366         case 11:
6367                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6368         case 10:
6369                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6370         case 9:
6371                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6372         case 8:
6373                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6374         case 7:
6375                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6376         case 6:
6377                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6378         case 5:
6379                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6380         case 4:
6381                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6382         case 3:
6383                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6384         case 2:
6385         case 1:
6386
6387         default:
6388                 break;
6389         };
6390
6391         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6392
6393         return 0;
6394 }
6395
6396 /* Called at device open time to get the chip ready for
6397  * packet processing.  Invoked with tp->lock held.
6398  */
6399 static int tg3_init_hw(struct tg3 *tp)
6400 {
6401         int err;
6402
6403         /* Force the chip into D0. */
6404         err = tg3_set_power_state(tp, PCI_D0);
6405         if (err)
6406                 goto out;
6407
6408         tg3_switch_clocks(tp);
6409
6410         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6411
6412         err = tg3_reset_hw(tp);
6413
6414 out:
6415         return err;
6416 }
6417
6418 #define TG3_STAT_ADD32(PSTAT, REG) \
6419 do {    u32 __val = tr32(REG); \
6420         (PSTAT)->low += __val; \
6421         if ((PSTAT)->low < __val) \
6422                 (PSTAT)->high += 1; \
6423 } while (0)
6424
6425 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6426 {
6427         struct tg3_hw_stats *sp = tp->hw_stats;
6428
6429         if (!netif_carrier_ok(tp->dev))
6430                 return;
6431
6432         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6433         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6434         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6435         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6436         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6437         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6438         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6439         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6440         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6441         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6442         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6443         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6444         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6445
6446         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6447         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6448         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6449         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6450         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6451         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6452         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6453         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6454         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6455         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6456         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6457         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6458         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6459         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6460 }
6461
6462 static void tg3_timer(unsigned long __opaque)
6463 {
6464         struct tg3 *tp = (struct tg3 *) __opaque;
6465
6466         if (tp->irq_sync)
6467                 goto restart_timer;
6468
6469         spin_lock(&tp->lock);
6470
6471         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6472                 /* All of this garbage is because when using non-tagged
6473                  * IRQ status the mailbox/status_block protocol the chip
6474                  * uses with the cpu is race prone.
6475                  */
6476                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6477                         tw32(GRC_LOCAL_CTRL,
6478                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6479                 } else {
6480                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6481                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6482                 }
6483
6484                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6485                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6486                         spin_unlock(&tp->lock);
6487                         schedule_work(&tp->reset_task);
6488                         return;
6489                 }
6490         }
6491
6492         /* This part only runs once per second. */
6493         if (!--tp->timer_counter) {
6494                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6495                         tg3_periodic_fetch_stats(tp);
6496
6497                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6498                         u32 mac_stat;
6499                         int phy_event;
6500
6501                         mac_stat = tr32(MAC_STATUS);
6502
6503                         phy_event = 0;
6504                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6505                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6506                                         phy_event = 1;
6507                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6508                                 phy_event = 1;
6509
6510                         if (phy_event)
6511                                 tg3_setup_phy(tp, 0);
6512                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6513                         u32 mac_stat = tr32(MAC_STATUS);
6514                         int need_setup = 0;
6515
6516                         if (netif_carrier_ok(tp->dev) &&
6517                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6518                                 need_setup = 1;
6519                         }
6520                         if (! netif_carrier_ok(tp->dev) &&
6521                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6522                                          MAC_STATUS_SIGNAL_DET))) {
6523                                 need_setup = 1;
6524                         }
6525                         if (need_setup) {
6526                                 tw32_f(MAC_MODE,
6527                                      (tp->mac_mode &
6528                                       ~MAC_MODE_PORT_MODE_MASK));
6529                                 udelay(40);
6530                                 tw32_f(MAC_MODE, tp->mac_mode);
6531                                 udelay(40);
6532                                 tg3_setup_phy(tp, 0);
6533                         }
6534                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6535                         tg3_serdes_parallel_detect(tp);
6536
6537                 tp->timer_counter = tp->timer_multiplier;
6538         }
6539
6540         /* Heartbeat is only sent once every 2 seconds.  */
6541         if (!--tp->asf_counter) {
6542                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6543                         u32 val;
6544
6545                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6546                                       FWCMD_NICDRV_ALIVE2);
6547                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6548                         /* 5 seconds timeout */
6549                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6550                         val = tr32(GRC_RX_CPU_EVENT);
6551                         val |= (1 << 14);
6552                         tw32(GRC_RX_CPU_EVENT, val);
6553                 }
6554                 tp->asf_counter = tp->asf_multiplier;
6555         }
6556
6557         spin_unlock(&tp->lock);
6558
6559 restart_timer:
6560         tp->timer.expires = jiffies + tp->timer_offset;
6561         add_timer(&tp->timer);
6562 }
6563
6564 static int tg3_request_irq(struct tg3 *tp)
6565 {
6566         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6567         unsigned long flags;
6568         struct net_device *dev = tp->dev;
6569
6570         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6571                 fn = tg3_msi;
6572                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6573                         fn = tg3_msi_1shot;
6574                 flags = SA_SAMPLE_RANDOM;
6575         } else {
6576                 fn = tg3_interrupt;
6577                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6578                         fn = tg3_interrupt_tagged;
6579                 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6580         }
6581         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6582 }
6583
6584 static int tg3_test_interrupt(struct tg3 *tp)
6585 {
6586         struct net_device *dev = tp->dev;
6587         int err, i;
6588         u32 int_mbox = 0;
6589
6590         if (!netif_running(dev))
6591                 return -ENODEV;
6592
6593         tg3_disable_ints(tp);
6594
6595         free_irq(tp->pdev->irq, dev);
6596
6597         err = request_irq(tp->pdev->irq, tg3_test_isr,
6598                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6599         if (err)
6600                 return err;
6601
6602         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6603         tg3_enable_ints(tp);
6604
6605         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6606                HOSTCC_MODE_NOW);
6607
6608         for (i = 0; i < 5; i++) {
6609                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6610                                         TG3_64BIT_REG_LOW);
6611                 if (int_mbox != 0)
6612                         break;
6613                 msleep(10);
6614         }
6615
6616         tg3_disable_ints(tp);
6617
6618         free_irq(tp->pdev->irq, dev);
6619         
6620         err = tg3_request_irq(tp);
6621
6622         if (err)
6623                 return err;
6624
6625         if (int_mbox != 0)
6626                 return 0;
6627
6628         return -EIO;
6629 }
6630
6631 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6632  * successfully restored
6633  */
6634 static int tg3_test_msi(struct tg3 *tp)
6635 {
6636         struct net_device *dev = tp->dev;
6637         int err;
6638         u16 pci_cmd;
6639
6640         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6641                 return 0;
6642
6643         /* Turn off SERR reporting in case MSI terminates with Master
6644          * Abort.
6645          */
6646         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6647         pci_write_config_word(tp->pdev, PCI_COMMAND,
6648                               pci_cmd & ~PCI_COMMAND_SERR);
6649
6650         err = tg3_test_interrupt(tp);
6651
6652         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6653
6654         if (!err)
6655                 return 0;
6656
6657         /* other failures */
6658         if (err != -EIO)
6659                 return err;
6660
6661         /* MSI test failed, go back to INTx mode */
6662         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6663                "switching to INTx mode. Please report this failure to "
6664                "the PCI maintainer and include system chipset information.\n",
6665                        tp->dev->name);
6666
6667         free_irq(tp->pdev->irq, dev);
6668         pci_disable_msi(tp->pdev);
6669
6670         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6671
6672         err = tg3_request_irq(tp);
6673         if (err)
6674                 return err;
6675
6676         /* Need to reset the chip because the MSI cycle may have terminated
6677          * with Master Abort.
6678          */
6679         tg3_full_lock(tp, 1);
6680
6681         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6682         err = tg3_init_hw(tp);
6683
6684         tg3_full_unlock(tp);
6685
6686         if (err)
6687                 free_irq(tp->pdev->irq, dev);
6688
6689         return err;
6690 }
6691
6692 static int tg3_open(struct net_device *dev)
6693 {
6694         struct tg3 *tp = netdev_priv(dev);
6695         int err;
6696
6697         tg3_full_lock(tp, 0);
6698
6699         err = tg3_set_power_state(tp, PCI_D0);
6700         if (err)
6701                 return err;
6702
6703         tg3_disable_ints(tp);
6704         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6705
6706         tg3_full_unlock(tp);
6707
6708         /* The placement of this call is tied
6709          * to the setup and use of Host TX descriptors.
6710          */
6711         err = tg3_alloc_consistent(tp);
6712         if (err)
6713                 return err;
6714
6715         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6716             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6717             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6718             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6719               (tp->pdev_peer == tp->pdev))) {
6720                 /* All MSI supporting chips should support tagged
6721                  * status.  Assert that this is the case.
6722                  */
6723                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6724                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6725                                "Not using MSI.\n", tp->dev->name);
6726                 } else if (pci_enable_msi(tp->pdev) == 0) {
6727                         u32 msi_mode;
6728
6729                         msi_mode = tr32(MSGINT_MODE);
6730                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6731                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6732                 }
6733         }
6734         err = tg3_request_irq(tp);
6735
6736         if (err) {
6737                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6738                         pci_disable_msi(tp->pdev);
6739                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6740                 }
6741                 tg3_free_consistent(tp);
6742                 return err;
6743         }
6744
6745         tg3_full_lock(tp, 0);
6746
6747         err = tg3_init_hw(tp);
6748         if (err) {
6749                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6750                 tg3_free_rings(tp);
6751         } else {
6752                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6753                         tp->timer_offset = HZ;
6754                 else
6755                         tp->timer_offset = HZ / 10;
6756
6757                 BUG_ON(tp->timer_offset > HZ);
6758                 tp->timer_counter = tp->timer_multiplier =
6759                         (HZ / tp->timer_offset);
6760                 tp->asf_counter = tp->asf_multiplier =
6761                         ((HZ / tp->timer_offset) * 2);
6762
6763                 init_timer(&tp->timer);
6764                 tp->timer.expires = jiffies + tp->timer_offset;
6765                 tp->timer.data = (unsigned long) tp;
6766                 tp->timer.function = tg3_timer;
6767         }
6768
6769         tg3_full_unlock(tp);
6770
6771         if (err) {
6772                 free_irq(tp->pdev->irq, dev);
6773                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6774                         pci_disable_msi(tp->pdev);
6775                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6776                 }
6777                 tg3_free_consistent(tp);
6778                 return err;
6779         }
6780
6781         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6782                 err = tg3_test_msi(tp);
6783
6784                 if (err) {
6785                         tg3_full_lock(tp, 0);
6786
6787                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6788                                 pci_disable_msi(tp->pdev);
6789                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6790                         }
6791                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6792                         tg3_free_rings(tp);
6793                         tg3_free_consistent(tp);
6794
6795                         tg3_full_unlock(tp);
6796
6797                         return err;
6798                 }
6799
6800                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6801                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6802                                 u32 val = tr32(0x7c04);
6803
6804                                 tw32(0x7c04, val | (1 << 29));
6805                         }
6806                 }
6807         }
6808
6809         tg3_full_lock(tp, 0);
6810
6811         add_timer(&tp->timer);
6812         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6813         tg3_enable_ints(tp);
6814
6815         tg3_full_unlock(tp);
6816
6817         netif_start_queue(dev);
6818
6819         return 0;
6820 }
6821
6822 #if 0
6823 /*static*/ void tg3_dump_state(struct tg3 *tp)
6824 {
6825         u32 val32, val32_2, val32_3, val32_4, val32_5;
6826         u16 val16;
6827         int i;
6828
6829         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6830         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6831         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6832                val16, val32);
6833
6834         /* MAC block */
6835         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6836                tr32(MAC_MODE), tr32(MAC_STATUS));
6837         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6838                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6839         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6840                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6841         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6842                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6843
6844         /* Send data initiator control block */
6845         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6846                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6847         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6848                tr32(SNDDATAI_STATSCTRL));
6849
6850         /* Send data completion control block */
6851         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6852
6853         /* Send BD ring selector block */
6854         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6855                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6856
6857         /* Send BD initiator control block */
6858         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6859                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6860
6861         /* Send BD completion control block */
6862         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6863
6864         /* Receive list placement control block */
6865         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6866                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6867         printk("       RCVLPC_STATSCTRL[%08x]\n",
6868                tr32(RCVLPC_STATSCTRL));
6869
6870         /* Receive data and receive BD initiator control block */
6871         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6872                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6873
6874         /* Receive data completion control block */
6875         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6876                tr32(RCVDCC_MODE));
6877
6878         /* Receive BD initiator control block */
6879         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6880                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6881
6882         /* Receive BD completion control block */
6883         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6884                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6885
6886         /* Receive list selector control block */
6887         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6888                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6889
6890         /* Mbuf cluster free block */
6891         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6892                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6893
6894         /* Host coalescing control block */
6895         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6896                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6897         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6898                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6899                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6900         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6901                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6902                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6903         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6904                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6905         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6906                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6907
6908         /* Memory arbiter control block */
6909         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6910                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6911
6912         /* Buffer manager control block */
6913         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6914                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6915         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6916                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6917         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6918                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6919                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6920                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6921
6922         /* Read DMA control block */
6923         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6924                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6925
6926         /* Write DMA control block */
6927         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6928                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6929
6930         /* DMA completion block */
6931         printk("DEBUG: DMAC_MODE[%08x]\n",
6932                tr32(DMAC_MODE));
6933
6934         /* GRC block */
6935         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6936                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6937         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6938                tr32(GRC_LOCAL_CTRL));
6939
6940         /* TG3_BDINFOs */
6941         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6942                tr32(RCVDBDI_JUMBO_BD + 0x0),
6943                tr32(RCVDBDI_JUMBO_BD + 0x4),
6944                tr32(RCVDBDI_JUMBO_BD + 0x8),
6945                tr32(RCVDBDI_JUMBO_BD + 0xc));
6946         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6947                tr32(RCVDBDI_STD_BD + 0x0),
6948                tr32(RCVDBDI_STD_BD + 0x4),
6949                tr32(RCVDBDI_STD_BD + 0x8),
6950                tr32(RCVDBDI_STD_BD + 0xc));
6951         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6952                tr32(RCVDBDI_MINI_BD + 0x0),
6953                tr32(RCVDBDI_MINI_BD + 0x4),
6954                tr32(RCVDBDI_MINI_BD + 0x8),
6955                tr32(RCVDBDI_MINI_BD + 0xc));
6956
6957         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6958         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6959         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6960         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6961         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6962                val32, val32_2, val32_3, val32_4);
6963
6964         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6965         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6966         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6967         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6968         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6969                val32, val32_2, val32_3, val32_4);
6970
6971         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6972         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6973         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6974         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6975         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6976         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6977                val32, val32_2, val32_3, val32_4, val32_5);
6978
6979         /* SW status block */
6980         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6981                tp->hw_status->status,
6982                tp->hw_status->status_tag,
6983                tp->hw_status->rx_jumbo_consumer,
6984                tp->hw_status->rx_consumer,
6985                tp->hw_status->rx_mini_consumer,
6986                tp->hw_status->idx[0].rx_producer,
6987                tp->hw_status->idx[0].tx_consumer);
6988
6989         /* SW statistics block */
6990         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6991                ((u32 *)tp->hw_stats)[0],
6992                ((u32 *)tp->hw_stats)[1],
6993                ((u32 *)tp->hw_stats)[2],
6994                ((u32 *)tp->hw_stats)[3]);
6995
6996         /* Mailboxes */
6997         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6998                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6999                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7000                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7001                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7002
7003         /* NIC side send descriptors. */
7004         for (i = 0; i < 6; i++) {
7005                 unsigned long txd;
7006
7007                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7008                         + (i * sizeof(struct tg3_tx_buffer_desc));
7009                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7010                        i,
7011                        readl(txd + 0x0), readl(txd + 0x4),
7012                        readl(txd + 0x8), readl(txd + 0xc));
7013         }
7014
7015         /* NIC side RX descriptors. */
7016         for (i = 0; i < 6; i++) {
7017                 unsigned long rxd;
7018
7019                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7020                         + (i * sizeof(struct tg3_rx_buffer_desc));
7021                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7022                        i,
7023                        readl(rxd + 0x0), readl(rxd + 0x4),
7024                        readl(rxd + 0x8), readl(rxd + 0xc));
7025                 rxd += (4 * sizeof(u32));
7026                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7027                        i,
7028                        readl(rxd + 0x0), readl(rxd + 0x4),
7029                        readl(rxd + 0x8), readl(rxd + 0xc));
7030         }
7031
7032         for (i = 0; i < 6; i++) {
7033                 unsigned long rxd;
7034
7035                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7036                         + (i * sizeof(struct tg3_rx_buffer_desc));
7037                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7038                        i,
7039                        readl(rxd + 0x0), readl(rxd + 0x4),
7040                        readl(rxd + 0x8), readl(rxd + 0xc));
7041                 rxd += (4 * sizeof(u32));
7042                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7043                        i,
7044                        readl(rxd + 0x0), readl(rxd + 0x4),
7045                        readl(rxd + 0x8), readl(rxd + 0xc));
7046         }
7047 }
7048 #endif
7049
7050 static struct net_device_stats *tg3_get_stats(struct net_device *);
7051 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7052
7053 static int tg3_close(struct net_device *dev)
7054 {
7055         struct tg3 *tp = netdev_priv(dev);
7056
7057         /* Calling flush_scheduled_work() may deadlock because
7058          * linkwatch_event() may be on the workqueue and it will try to get
7059          * the rtnl_lock which we are holding.
7060          */
7061         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7062                 msleep(1);
7063
7064         netif_stop_queue(dev);
7065
7066         del_timer_sync(&tp->timer);
7067
7068         tg3_full_lock(tp, 1);
7069 #if 0
7070         tg3_dump_state(tp);
7071 #endif
7072
7073         tg3_disable_ints(tp);
7074
7075         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7076         tg3_free_rings(tp);
7077         tp->tg3_flags &=
7078                 ~(TG3_FLAG_INIT_COMPLETE |
7079                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7080
7081         tg3_full_unlock(tp);
7082
7083         free_irq(tp->pdev->irq, dev);
7084         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7085                 pci_disable_msi(tp->pdev);
7086                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7087         }
7088
7089         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7090                sizeof(tp->net_stats_prev));
7091         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7092                sizeof(tp->estats_prev));
7093
7094         tg3_free_consistent(tp);
7095
7096         tg3_set_power_state(tp, PCI_D3hot);
7097
7098         netif_carrier_off(tp->dev);
7099
7100         return 0;
7101 }
7102
7103 static inline unsigned long get_stat64(tg3_stat64_t *val)
7104 {
7105         unsigned long ret;
7106
7107 #if (BITS_PER_LONG == 32)
7108         ret = val->low;
7109 #else
7110         ret = ((u64)val->high << 32) | ((u64)val->low);
7111 #endif
7112         return ret;
7113 }
7114
7115 static unsigned long calc_crc_errors(struct tg3 *tp)
7116 {
7117         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7118
7119         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7120             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7121              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7122                 u32 val;
7123
7124                 spin_lock_bh(&tp->lock);
7125                 if (!tg3_readphy(tp, 0x1e, &val)) {
7126                         tg3_writephy(tp, 0x1e, val | 0x8000);
7127                         tg3_readphy(tp, 0x14, &val);
7128                 } else
7129                         val = 0;
7130                 spin_unlock_bh(&tp->lock);
7131
7132                 tp->phy_crc_errors += val;
7133
7134                 return tp->phy_crc_errors;
7135         }
7136
7137         return get_stat64(&hw_stats->rx_fcs_errors);
7138 }
7139
7140 #define ESTAT_ADD(member) \
7141         estats->member =        old_estats->member + \
7142                                 get_stat64(&hw_stats->member)
7143
7144 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7145 {
7146         struct tg3_ethtool_stats *estats = &tp->estats;
7147         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7148         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7149
7150         if (!hw_stats)
7151                 return old_estats;
7152
7153         ESTAT_ADD(rx_octets);
7154         ESTAT_ADD(rx_fragments);
7155         ESTAT_ADD(rx_ucast_packets);
7156         ESTAT_ADD(rx_mcast_packets);
7157         ESTAT_ADD(rx_bcast_packets);
7158         ESTAT_ADD(rx_fcs_errors);
7159         ESTAT_ADD(rx_align_errors);
7160         ESTAT_ADD(rx_xon_pause_rcvd);
7161         ESTAT_ADD(rx_xoff_pause_rcvd);
7162         ESTAT_ADD(rx_mac_ctrl_rcvd);
7163         ESTAT_ADD(rx_xoff_entered);
7164         ESTAT_ADD(rx_frame_too_long_errors);
7165         ESTAT_ADD(rx_jabbers);
7166         ESTAT_ADD(rx_undersize_packets);
7167         ESTAT_ADD(rx_in_length_errors);
7168         ESTAT_ADD(rx_out_length_errors);
7169         ESTAT_ADD(rx_64_or_less_octet_packets);
7170         ESTAT_ADD(rx_65_to_127_octet_packets);
7171         ESTAT_ADD(rx_128_to_255_octet_packets);
7172         ESTAT_ADD(rx_256_to_511_octet_packets);
7173         ESTAT_ADD(rx_512_to_1023_octet_packets);
7174         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7175         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7176         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7177         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7178         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7179
7180         ESTAT_ADD(tx_octets);
7181         ESTAT_ADD(tx_collisions);
7182         ESTAT_ADD(tx_xon_sent);
7183         ESTAT_ADD(tx_xoff_sent);
7184         ESTAT_ADD(tx_flow_control);
7185         ESTAT_ADD(tx_mac_errors);
7186         ESTAT_ADD(tx_single_collisions);
7187         ESTAT_ADD(tx_mult_collisions);
7188         ESTAT_ADD(tx_deferred);
7189         ESTAT_ADD(tx_excessive_collisions);
7190         ESTAT_ADD(tx_late_collisions);
7191         ESTAT_ADD(tx_collide_2times);
7192         ESTAT_ADD(tx_collide_3times);
7193         ESTAT_ADD(tx_collide_4times);
7194         ESTAT_ADD(tx_collide_5times);
7195         ESTAT_ADD(tx_collide_6times);
7196         ESTAT_ADD(tx_collide_7times);
7197         ESTAT_ADD(tx_collide_8times);
7198         ESTAT_ADD(tx_collide_9times);
7199         ESTAT_ADD(tx_collide_10times);
7200         ESTAT_ADD(tx_collide_11times);
7201         ESTAT_ADD(tx_collide_12times);
7202         ESTAT_ADD(tx_collide_13times);
7203         ESTAT_ADD(tx_collide_14times);
7204         ESTAT_ADD(tx_collide_15times);
7205         ESTAT_ADD(tx_ucast_packets);
7206         ESTAT_ADD(tx_mcast_packets);
7207         ESTAT_ADD(tx_bcast_packets);
7208         ESTAT_ADD(tx_carrier_sense_errors);
7209         ESTAT_ADD(tx_discards);
7210         ESTAT_ADD(tx_errors);
7211
7212         ESTAT_ADD(dma_writeq_full);
7213         ESTAT_ADD(dma_write_prioq_full);
7214         ESTAT_ADD(rxbds_empty);
7215         ESTAT_ADD(rx_discards);
7216         ESTAT_ADD(rx_errors);
7217         ESTAT_ADD(rx_threshold_hit);
7218
7219         ESTAT_ADD(dma_readq_full);
7220         ESTAT_ADD(dma_read_prioq_full);
7221         ESTAT_ADD(tx_comp_queue_full);
7222
7223         ESTAT_ADD(ring_set_send_prod_index);
7224         ESTAT_ADD(ring_status_update);
7225         ESTAT_ADD(nic_irqs);
7226         ESTAT_ADD(nic_avoided_irqs);
7227         ESTAT_ADD(nic_tx_threshold_hit);
7228
7229         return estats;
7230 }
7231
7232 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7233 {
7234         struct tg3 *tp = netdev_priv(dev);
7235         struct net_device_stats *stats = &tp->net_stats;
7236         struct net_device_stats *old_stats = &tp->net_stats_prev;
7237         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7238
7239         if (!hw_stats)
7240                 return old_stats;
7241
7242         stats->rx_packets = old_stats->rx_packets +
7243                 get_stat64(&hw_stats->rx_ucast_packets) +
7244                 get_stat64(&hw_stats->rx_mcast_packets) +
7245                 get_stat64(&hw_stats->rx_bcast_packets);
7246                 
7247         stats->tx_packets = old_stats->tx_packets +
7248                 get_stat64(&hw_stats->tx_ucast_packets) +
7249                 get_stat64(&hw_stats->tx_mcast_packets) +
7250                 get_stat64(&hw_stats->tx_bcast_packets);
7251
7252         stats->rx_bytes = old_stats->rx_bytes +
7253                 get_stat64(&hw_stats->rx_octets);
7254         stats->tx_bytes = old_stats->tx_bytes +
7255                 get_stat64(&hw_stats->tx_octets);
7256
7257         stats->rx_errors = old_stats->rx_errors +
7258                 get_stat64(&hw_stats->rx_errors);
7259         stats->tx_errors = old_stats->tx_errors +
7260                 get_stat64(&hw_stats->tx_errors) +
7261                 get_stat64(&hw_stats->tx_mac_errors) +
7262                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7263                 get_stat64(&hw_stats->tx_discards);
7264
7265         stats->multicast = old_stats->multicast +
7266                 get_stat64(&hw_stats->rx_mcast_packets);
7267         stats->collisions = old_stats->collisions +
7268                 get_stat64(&hw_stats->tx_collisions);
7269
7270         stats->rx_length_errors = old_stats->rx_length_errors +
7271                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7272                 get_stat64(&hw_stats->rx_undersize_packets);
7273
7274         stats->rx_over_errors = old_stats->rx_over_errors +
7275                 get_stat64(&hw_stats->rxbds_empty);
7276         stats->rx_frame_errors = old_stats->rx_frame_errors +
7277                 get_stat64(&hw_stats->rx_align_errors);
7278         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7279                 get_stat64(&hw_stats->tx_discards);
7280         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7281                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7282
7283         stats->rx_crc_errors = old_stats->rx_crc_errors +
7284                 calc_crc_errors(tp);
7285
7286         stats->rx_missed_errors = old_stats->rx_missed_errors +
7287                 get_stat64(&hw_stats->rx_discards);
7288
7289         return stats;
7290 }
7291
7292 static inline u32 calc_crc(unsigned char *buf, int len)
7293 {
7294         u32 reg;
7295         u32 tmp;
7296         int j, k;
7297
7298         reg = 0xffffffff;
7299
7300         for (j = 0; j < len; j++) {
7301                 reg ^= buf[j];
7302
7303                 for (k = 0; k < 8; k++) {
7304                         tmp = reg & 0x01;
7305
7306                         reg >>= 1;
7307
7308                         if (tmp) {
7309                                 reg ^= 0xedb88320;
7310                         }
7311                 }
7312         }
7313
7314         return ~reg;
7315 }
7316
7317 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7318 {
7319         /* accept or reject all multicast frames */
7320         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7321         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7322         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7323         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7324 }
7325
7326 static void __tg3_set_rx_mode(struct net_device *dev)
7327 {
7328         struct tg3 *tp = netdev_priv(dev);
7329         u32 rx_mode;
7330
7331         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7332                                   RX_MODE_KEEP_VLAN_TAG);
7333
7334         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7335          * flag clear.
7336          */
7337 #if TG3_VLAN_TAG_USED
7338         if (!tp->vlgrp &&
7339             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7340                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7341 #else
7342         /* By definition, VLAN is disabled always in this
7343          * case.
7344          */
7345         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7346                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7347 #endif
7348
7349         if (dev->flags & IFF_PROMISC) {
7350                 /* Promiscuous mode. */
7351                 rx_mode |= RX_MODE_PROMISC;
7352         } else if (dev->flags & IFF_ALLMULTI) {
7353                 /* Accept all multicast. */
7354                 tg3_set_multi (tp, 1);
7355         } else if (dev->mc_count < 1) {
7356                 /* Reject all multicast. */
7357                 tg3_set_multi (tp, 0);
7358         } else {
7359                 /* Accept one or more multicast(s). */
7360                 struct dev_mc_list *mclist;
7361                 unsigned int i;
7362                 u32 mc_filter[4] = { 0, };
7363                 u32 regidx;
7364                 u32 bit;
7365                 u32 crc;
7366
7367                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7368                      i++, mclist = mclist->next) {
7369
7370                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7371                         bit = ~crc & 0x7f;
7372                         regidx = (bit & 0x60) >> 5;
7373                         bit &= 0x1f;
7374                         mc_filter[regidx] |= (1 << bit);
7375                 }
7376
7377                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7378                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7379                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7380                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7381         }
7382
7383         if (rx_mode != tp->rx_mode) {
7384                 tp->rx_mode = rx_mode;
7385                 tw32_f(MAC_RX_MODE, rx_mode);
7386                 udelay(10);
7387         }
7388 }
7389
7390 static void tg3_set_rx_mode(struct net_device *dev)
7391 {
7392         struct tg3 *tp = netdev_priv(dev);
7393
7394         if (!netif_running(dev))
7395                 return;
7396
7397         tg3_full_lock(tp, 0);
7398         __tg3_set_rx_mode(dev);
7399         tg3_full_unlock(tp);
7400 }
7401
7402 #define TG3_REGDUMP_LEN         (32 * 1024)
7403
7404 static int tg3_get_regs_len(struct net_device *dev)
7405 {
7406         return TG3_REGDUMP_LEN;
7407 }
7408
7409 static void tg3_get_regs(struct net_device *dev,
7410                 struct ethtool_regs *regs, void *_p)
7411 {
7412         u32 *p = _p;
7413         struct tg3 *tp = netdev_priv(dev);
7414         u8 *orig_p = _p;
7415         int i;
7416
7417         regs->version = 0;
7418
7419         memset(p, 0, TG3_REGDUMP_LEN);
7420
7421         if (tp->link_config.phy_is_low_power)
7422                 return;
7423
7424         tg3_full_lock(tp, 0);
7425
7426 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7427 #define GET_REG32_LOOP(base,len)                \
7428 do {    p = (u32 *)(orig_p + (base));           \
7429         for (i = 0; i < len; i += 4)            \
7430                 __GET_REG32((base) + i);        \
7431 } while (0)
7432 #define GET_REG32_1(reg)                        \
7433 do {    p = (u32 *)(orig_p + (reg));            \
7434         __GET_REG32((reg));                     \
7435 } while (0)
7436
7437         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7438         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7439         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7440         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7441         GET_REG32_1(SNDDATAC_MODE);
7442         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7443         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7444         GET_REG32_1(SNDBDC_MODE);
7445         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7446         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7447         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7448         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7449         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7450         GET_REG32_1(RCVDCC_MODE);
7451         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7452         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7453         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7454         GET_REG32_1(MBFREE_MODE);
7455         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7456         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7457         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7458         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7459         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7460         GET_REG32_1(RX_CPU_MODE);
7461         GET_REG32_1(RX_CPU_STATE);
7462         GET_REG32_1(RX_CPU_PGMCTR);
7463         GET_REG32_1(RX_CPU_HWBKPT);
7464         GET_REG32_1(TX_CPU_MODE);
7465         GET_REG32_1(TX_CPU_STATE);
7466         GET_REG32_1(TX_CPU_PGMCTR);
7467         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7468         GET_REG32_LOOP(FTQ_RESET, 0x120);
7469         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7470         GET_REG32_1(DMAC_MODE);
7471         GET_REG32_LOOP(GRC_MODE, 0x4c);
7472         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7473                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7474
7475 #undef __GET_REG32
7476 #undef GET_REG32_LOOP
7477 #undef GET_REG32_1
7478
7479         tg3_full_unlock(tp);
7480 }
7481
7482 static int tg3_get_eeprom_len(struct net_device *dev)
7483 {
7484         struct tg3 *tp = netdev_priv(dev);
7485
7486         return tp->nvram_size;
7487 }
7488
7489 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7490 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7491
7492 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7493 {
7494         struct tg3 *tp = netdev_priv(dev);
7495         int ret;
7496         u8  *pd;
7497         u32 i, offset, len, val, b_offset, b_count;
7498
7499         if (tp->link_config.phy_is_low_power)
7500                 return -EAGAIN;
7501
7502         offset = eeprom->offset;
7503         len = eeprom->len;
7504         eeprom->len = 0;
7505
7506         eeprom->magic = TG3_EEPROM_MAGIC;
7507
7508         if (offset & 3) {
7509                 /* adjustments to start on required 4 byte boundary */
7510                 b_offset = offset & 3;
7511                 b_count = 4 - b_offset;
7512                 if (b_count > len) {
7513                         /* i.e. offset=1 len=2 */
7514                         b_count = len;
7515                 }
7516                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7517                 if (ret)
7518                         return ret;
7519                 val = cpu_to_le32(val);
7520                 memcpy(data, ((char*)&val) + b_offset, b_count);
7521                 len -= b_count;
7522                 offset += b_count;
7523                 eeprom->len += b_count;
7524         }
7525
7526         /* read bytes upto the last 4 byte boundary */
7527         pd = &data[eeprom->len];
7528         for (i = 0; i < (len - (len & 3)); i += 4) {
7529                 ret = tg3_nvram_read(tp, offset + i, &val);
7530                 if (ret) {
7531                         eeprom->len += i;
7532                         return ret;
7533                 }
7534                 val = cpu_to_le32(val);
7535                 memcpy(pd + i, &val, 4);
7536         }
7537         eeprom->len += i;
7538
7539         if (len & 3) {
7540                 /* read last bytes not ending on 4 byte boundary */
7541                 pd = &data[eeprom->len];
7542                 b_count = len & 3;
7543                 b_offset = offset + len - b_count;
7544                 ret = tg3_nvram_read(tp, b_offset, &val);
7545                 if (ret)
7546                         return ret;
7547                 val = cpu_to_le32(val);
7548                 memcpy(pd, ((char*)&val), b_count);
7549                 eeprom->len += b_count;
7550         }
7551         return 0;
7552 }
7553
7554 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7555
7556 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7557 {
7558         struct tg3 *tp = netdev_priv(dev);
7559         int ret;
7560         u32 offset, len, b_offset, odd_len, start, end;
7561         u8 *buf;
7562
7563         if (tp->link_config.phy_is_low_power)
7564                 return -EAGAIN;
7565
7566         if (eeprom->magic != TG3_EEPROM_MAGIC)
7567                 return -EINVAL;
7568
7569         offset = eeprom->offset;
7570         len = eeprom->len;
7571
7572         if ((b_offset = (offset & 3))) {
7573                 /* adjustments to start on required 4 byte boundary */
7574                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7575                 if (ret)
7576                         return ret;
7577                 start = cpu_to_le32(start);
7578                 len += b_offset;
7579                 offset &= ~3;
7580                 if (len < 4)
7581                         len = 4;
7582         }
7583
7584         odd_len = 0;
7585         if (len & 3) {
7586                 /* adjustments to end on required 4 byte boundary */
7587                 odd_len = 1;
7588                 len = (len + 3) & ~3;
7589                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7590                 if (ret)
7591                         return ret;
7592                 end = cpu_to_le32(end);
7593         }
7594
7595         buf = data;
7596         if (b_offset || odd_len) {
7597                 buf = kmalloc(len, GFP_KERNEL);
7598                 if (buf == 0)
7599                         return -ENOMEM;
7600                 if (b_offset)
7601                         memcpy(buf, &start, 4);
7602                 if (odd_len)
7603                         memcpy(buf+len-4, &end, 4);
7604                 memcpy(buf + b_offset, data, eeprom->len);
7605         }
7606
7607         ret = tg3_nvram_write_block(tp, offset, len, buf);
7608
7609         if (buf != data)
7610                 kfree(buf);
7611
7612         return ret;
7613 }
7614
7615 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7616 {
7617         struct tg3 *tp = netdev_priv(dev);
7618   
7619         cmd->supported = (SUPPORTED_Autoneg);
7620
7621         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7622                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7623                                    SUPPORTED_1000baseT_Full);
7624
7625         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7626                 cmd->supported |= (SUPPORTED_100baseT_Half |
7627                                   SUPPORTED_100baseT_Full |
7628                                   SUPPORTED_10baseT_Half |
7629                                   SUPPORTED_10baseT_Full |
7630                                   SUPPORTED_MII);
7631         else
7632                 cmd->supported |= SUPPORTED_FIBRE;
7633   
7634         cmd->advertising = tp->link_config.advertising;
7635         if (netif_running(dev)) {
7636                 cmd->speed = tp->link_config.active_speed;
7637                 cmd->duplex = tp->link_config.active_duplex;
7638         }
7639         cmd->port = 0;
7640         cmd->phy_address = PHY_ADDR;
7641         cmd->transceiver = 0;
7642         cmd->autoneg = tp->link_config.autoneg;
7643         cmd->maxtxpkt = 0;
7644         cmd->maxrxpkt = 0;
7645         return 0;
7646 }
7647   
7648 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7649 {
7650         struct tg3 *tp = netdev_priv(dev);
7651   
7652         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7653                 /* These are the only valid advertisement bits allowed.  */
7654                 if (cmd->autoneg == AUTONEG_ENABLE &&
7655                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7656                                           ADVERTISED_1000baseT_Full |
7657                                           ADVERTISED_Autoneg |
7658                                           ADVERTISED_FIBRE)))
7659                         return -EINVAL;
7660                 /* Fiber can only do SPEED_1000.  */
7661                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7662                          (cmd->speed != SPEED_1000))
7663                         return -EINVAL;
7664         /* Copper cannot force SPEED_1000.  */
7665         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7666                    (cmd->speed == SPEED_1000))
7667                 return -EINVAL;
7668         else if ((cmd->speed == SPEED_1000) &&
7669                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7670                 return -EINVAL;
7671
7672         tg3_full_lock(tp, 0);
7673
7674         tp->link_config.autoneg = cmd->autoneg;
7675         if (cmd->autoneg == AUTONEG_ENABLE) {
7676                 tp->link_config.advertising = cmd->advertising;
7677                 tp->link_config.speed = SPEED_INVALID;
7678                 tp->link_config.duplex = DUPLEX_INVALID;
7679         } else {
7680                 tp->link_config.advertising = 0;
7681                 tp->link_config.speed = cmd->speed;
7682                 tp->link_config.duplex = cmd->duplex;
7683         }
7684   
7685         if (netif_running(dev))
7686                 tg3_setup_phy(tp, 1);
7687
7688         tg3_full_unlock(tp);
7689   
7690         return 0;
7691 }
7692   
7693 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7694 {
7695         struct tg3 *tp = netdev_priv(dev);
7696   
7697         strcpy(info->driver, DRV_MODULE_NAME);
7698         strcpy(info->version, DRV_MODULE_VERSION);
7699         strcpy(info->fw_version, tp->fw_ver);
7700         strcpy(info->bus_info, pci_name(tp->pdev));
7701 }
7702   
7703 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7704 {
7705         struct tg3 *tp = netdev_priv(dev);
7706   
7707         wol->supported = WAKE_MAGIC;
7708         wol->wolopts = 0;
7709         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7710                 wol->wolopts = WAKE_MAGIC;
7711         memset(&wol->sopass, 0, sizeof(wol->sopass));
7712 }
7713   
7714 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7715 {
7716         struct tg3 *tp = netdev_priv(dev);
7717   
7718         if (wol->wolopts & ~WAKE_MAGIC)
7719                 return -EINVAL;
7720         if ((wol->wolopts & WAKE_MAGIC) &&
7721             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7722             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7723                 return -EINVAL;
7724   
7725         spin_lock_bh(&tp->lock);
7726         if (wol->wolopts & WAKE_MAGIC)
7727                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7728         else
7729                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7730         spin_unlock_bh(&tp->lock);
7731   
7732         return 0;
7733 }
7734   
7735 static u32 tg3_get_msglevel(struct net_device *dev)
7736 {
7737         struct tg3 *tp = netdev_priv(dev);
7738         return tp->msg_enable;
7739 }
7740   
7741 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7742 {
7743         struct tg3 *tp = netdev_priv(dev);
7744         tp->msg_enable = value;
7745 }
7746   
7747 #if TG3_TSO_SUPPORT != 0
7748 static int tg3_set_tso(struct net_device *dev, u32 value)
7749 {
7750         struct tg3 *tp = netdev_priv(dev);
7751
7752         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7753                 if (value)
7754                         return -EINVAL;
7755                 return 0;
7756         }
7757         return ethtool_op_set_tso(dev, value);
7758 }
7759 #endif
7760   
7761 static int tg3_nway_reset(struct net_device *dev)
7762 {
7763         struct tg3 *tp = netdev_priv(dev);
7764         u32 bmcr;
7765         int r;
7766   
7767         if (!netif_running(dev))
7768                 return -EAGAIN;
7769
7770         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7771                 return -EINVAL;
7772
7773         spin_lock_bh(&tp->lock);
7774         r = -EINVAL;
7775         tg3_readphy(tp, MII_BMCR, &bmcr);
7776         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7777             ((bmcr & BMCR_ANENABLE) ||
7778              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7779                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7780                                            BMCR_ANENABLE);
7781                 r = 0;
7782         }
7783         spin_unlock_bh(&tp->lock);
7784   
7785         return r;
7786 }
7787   
7788 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7789 {
7790         struct tg3 *tp = netdev_priv(dev);
7791   
7792         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7793         ering->rx_mini_max_pending = 0;
7794         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7795                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7796         else
7797                 ering->rx_jumbo_max_pending = 0;
7798
7799         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7800
7801         ering->rx_pending = tp->rx_pending;
7802         ering->rx_mini_pending = 0;
7803         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7804                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7805         else
7806                 ering->rx_jumbo_pending = 0;
7807
7808         ering->tx_pending = tp->tx_pending;
7809 }
7810   
7811 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7812 {
7813         struct tg3 *tp = netdev_priv(dev);
7814         int irq_sync = 0;
7815   
7816         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7817             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7818             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7819                 return -EINVAL;
7820   
7821         if (netif_running(dev)) {
7822                 tg3_netif_stop(tp);
7823                 irq_sync = 1;
7824         }
7825
7826         tg3_full_lock(tp, irq_sync);
7827   
7828         tp->rx_pending = ering->rx_pending;
7829
7830         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7831             tp->rx_pending > 63)
7832                 tp->rx_pending = 63;
7833         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7834         tp->tx_pending = ering->tx_pending;
7835
7836         if (netif_running(dev)) {
7837                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7838                 tg3_init_hw(tp);
7839                 tg3_netif_start(tp);
7840         }
7841
7842         tg3_full_unlock(tp);
7843   
7844         return 0;
7845 }
7846   
7847 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7848 {
7849         struct tg3 *tp = netdev_priv(dev);
7850   
7851         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7852         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7853         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7854 }
7855   
7856 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7857 {
7858         struct tg3 *tp = netdev_priv(dev);
7859         int irq_sync = 0;
7860   
7861         if (netif_running(dev)) {
7862                 tg3_netif_stop(tp);
7863                 irq_sync = 1;
7864         }
7865
7866         tg3_full_lock(tp, irq_sync);
7867
7868         if (epause->autoneg)
7869                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7870         else
7871                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7872         if (epause->rx_pause)
7873                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7874         else
7875                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7876         if (epause->tx_pause)
7877                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7878         else
7879                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7880
7881         if (netif_running(dev)) {
7882                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7883                 tg3_init_hw(tp);
7884                 tg3_netif_start(tp);
7885         }
7886
7887         tg3_full_unlock(tp);
7888   
7889         return 0;
7890 }
7891   
7892 static u32 tg3_get_rx_csum(struct net_device *dev)
7893 {
7894         struct tg3 *tp = netdev_priv(dev);
7895         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7896 }
7897   
7898 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7899 {
7900         struct tg3 *tp = netdev_priv(dev);
7901   
7902         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7903                 if (data != 0)
7904                         return -EINVAL;
7905                 return 0;
7906         }
7907   
7908         spin_lock_bh(&tp->lock);
7909         if (data)
7910                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7911         else
7912                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7913         spin_unlock_bh(&tp->lock);
7914   
7915         return 0;
7916 }
7917   
7918 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7919 {
7920         struct tg3 *tp = netdev_priv(dev);
7921   
7922         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7923                 if (data != 0)
7924                         return -EINVAL;
7925                 return 0;
7926         }
7927   
7928         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7930                 ethtool_op_set_tx_hw_csum(dev, data);
7931         else
7932                 ethtool_op_set_tx_csum(dev, data);
7933
7934         return 0;
7935 }
7936
7937 static int tg3_get_stats_count (struct net_device *dev)
7938 {
7939         return TG3_NUM_STATS;
7940 }
7941
7942 static int tg3_get_test_count (struct net_device *dev)
7943 {
7944         return TG3_NUM_TEST;
7945 }
7946
7947 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7948 {
7949         switch (stringset) {
7950         case ETH_SS_STATS:
7951                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7952                 break;
7953         case ETH_SS_TEST:
7954                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7955                 break;
7956         default:
7957                 WARN_ON(1);     /* we need a WARN() */
7958                 break;
7959         }
7960 }
7961
7962 static int tg3_phys_id(struct net_device *dev, u32 data)
7963 {
7964         struct tg3 *tp = netdev_priv(dev);
7965         int i;
7966
7967         if (!netif_running(tp->dev))
7968                 return -EAGAIN;
7969
7970         if (data == 0)
7971                 data = 2;
7972
7973         for (i = 0; i < (data * 2); i++) {
7974                 if ((i % 2) == 0)
7975                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7976                                            LED_CTRL_1000MBPS_ON |
7977                                            LED_CTRL_100MBPS_ON |
7978                                            LED_CTRL_10MBPS_ON |
7979                                            LED_CTRL_TRAFFIC_OVERRIDE |
7980                                            LED_CTRL_TRAFFIC_BLINK |
7981                                            LED_CTRL_TRAFFIC_LED);
7982         
7983                 else
7984                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7985                                            LED_CTRL_TRAFFIC_OVERRIDE);
7986
7987                 if (msleep_interruptible(500))
7988                         break;
7989         }
7990         tw32(MAC_LED_CTRL, tp->led_ctrl);
7991         return 0;
7992 }
7993
7994 static void tg3_get_ethtool_stats (struct net_device *dev,
7995                                    struct ethtool_stats *estats, u64 *tmp_stats)
7996 {
7997         struct tg3 *tp = netdev_priv(dev);
7998         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7999 }
8000
8001 #define NVRAM_TEST_SIZE 0x100
8002 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8003
8004 static int tg3_test_nvram(struct tg3 *tp)
8005 {
8006         u32 *buf, csum, magic;
8007         int i, j, err = 0, size;
8008
8009         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8010                 return -EIO;
8011
8012         if (magic == TG3_EEPROM_MAGIC)
8013                 size = NVRAM_TEST_SIZE;
8014         else if ((magic & 0xff000000) == 0xa5000000) {
8015                 if ((magic & 0xe00000) == 0x200000)
8016                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8017                 else
8018                         return 0;
8019         } else
8020                 return -EIO;
8021
8022         buf = kmalloc(size, GFP_KERNEL);
8023         if (buf == NULL)
8024                 return -ENOMEM;
8025
8026         err = -EIO;
8027         for (i = 0, j = 0; i < size; i += 4, j++) {
8028                 u32 val;
8029
8030                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8031                         break;
8032                 buf[j] = cpu_to_le32(val);
8033         }
8034         if (i < size)
8035                 goto out;
8036
8037         /* Selfboot format */
8038         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8039                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8040
8041                 for (i = 0; i < size; i++)
8042                         csum8 += buf8[i];
8043
8044                 if (csum8 == 0)
8045                         return 0;
8046                 return -EIO;
8047         }
8048
8049         /* Bootstrap checksum at offset 0x10 */
8050         csum = calc_crc((unsigned char *) buf, 0x10);
8051         if(csum != cpu_to_le32(buf[0x10/4]))
8052                 goto out;
8053
8054         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8055         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8056         if (csum != cpu_to_le32(buf[0xfc/4]))
8057                  goto out;
8058
8059         err = 0;
8060
8061 out:
8062         kfree(buf);
8063         return err;
8064 }
8065
8066 #define TG3_SERDES_TIMEOUT_SEC  2
8067 #define TG3_COPPER_TIMEOUT_SEC  6
8068
8069 static int tg3_test_link(struct tg3 *tp)
8070 {
8071         int i, max;
8072
8073         if (!netif_running(tp->dev))
8074                 return -ENODEV;
8075
8076         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8077                 max = TG3_SERDES_TIMEOUT_SEC;
8078         else
8079                 max = TG3_COPPER_TIMEOUT_SEC;
8080
8081         for (i = 0; i < max; i++) {
8082                 if (netif_carrier_ok(tp->dev))
8083                         return 0;
8084
8085                 if (msleep_interruptible(1000))
8086                         break;
8087         }
8088
8089         return -EIO;
8090 }
8091
8092 /* Only test the commonly used registers */
8093 static int tg3_test_registers(struct tg3 *tp)
8094 {
8095         int i, is_5705;
8096         u32 offset, read_mask, write_mask, val, save_val, read_val;
8097         static struct {
8098                 u16 offset;
8099                 u16 flags;
8100 #define TG3_FL_5705     0x1
8101 #define TG3_FL_NOT_5705 0x2
8102 #define TG3_FL_NOT_5788 0x4
8103                 u32 read_mask;
8104                 u32 write_mask;
8105         } reg_tbl[] = {
8106                 /* MAC Control Registers */
8107                 { MAC_MODE, TG3_FL_NOT_5705,
8108                         0x00000000, 0x00ef6f8c },
8109                 { MAC_MODE, TG3_FL_5705,
8110                         0x00000000, 0x01ef6b8c },
8111                 { MAC_STATUS, TG3_FL_NOT_5705,
8112                         0x03800107, 0x00000000 },
8113                 { MAC_STATUS, TG3_FL_5705,
8114                         0x03800100, 0x00000000 },
8115                 { MAC_ADDR_0_HIGH, 0x0000,
8116                         0x00000000, 0x0000ffff },
8117                 { MAC_ADDR_0_LOW, 0x0000,
8118                         0x00000000, 0xffffffff },
8119                 { MAC_RX_MTU_SIZE, 0x0000,
8120                         0x00000000, 0x0000ffff },
8121                 { MAC_TX_MODE, 0x0000,
8122                         0x00000000, 0x00000070 },
8123                 { MAC_TX_LENGTHS, 0x0000,
8124                         0x00000000, 0x00003fff },
8125                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8126                         0x00000000, 0x000007fc },
8127                 { MAC_RX_MODE, TG3_FL_5705,
8128                         0x00000000, 0x000007dc },
8129                 { MAC_HASH_REG_0, 0x0000,
8130                         0x00000000, 0xffffffff },
8131                 { MAC_HASH_REG_1, 0x0000,
8132                         0x00000000, 0xffffffff },
8133                 { MAC_HASH_REG_2, 0x0000,
8134                         0x00000000, 0xffffffff },
8135                 { MAC_HASH_REG_3, 0x0000,
8136                         0x00000000, 0xffffffff },
8137
8138                 /* Receive Data and Receive BD Initiator Control Registers. */
8139                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8140                         0x00000000, 0xffffffff },
8141                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8142                         0x00000000, 0xffffffff },
8143                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8144                         0x00000000, 0x00000003 },
8145                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8146                         0x00000000, 0xffffffff },
8147                 { RCVDBDI_STD_BD+0, 0x0000,
8148                         0x00000000, 0xffffffff },
8149                 { RCVDBDI_STD_BD+4, 0x0000,
8150                         0x00000000, 0xffffffff },
8151                 { RCVDBDI_STD_BD+8, 0x0000,
8152                         0x00000000, 0xffff0002 },
8153                 { RCVDBDI_STD_BD+0xc, 0x0000,
8154                         0x00000000, 0xffffffff },
8155         
8156                 /* Receive BD Initiator Control Registers. */
8157                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8158                         0x00000000, 0xffffffff },
8159                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8160                         0x00000000, 0x000003ff },
8161                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8162                         0x00000000, 0xffffffff },
8163         
8164                 /* Host Coalescing Control Registers. */
8165                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8166                         0x00000000, 0x00000004 },
8167                 { HOSTCC_MODE, TG3_FL_5705,
8168                         0x00000000, 0x000000f6 },
8169                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8170                         0x00000000, 0xffffffff },
8171                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8172                         0x00000000, 0x000003ff },
8173                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8174                         0x00000000, 0xffffffff },
8175                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8176                         0x00000000, 0x000003ff },
8177                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8178                         0x00000000, 0xffffffff },
8179                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8180                         0x00000000, 0x000000ff },
8181                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8182                         0x00000000, 0xffffffff },
8183                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8184                         0x00000000, 0x000000ff },
8185                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8186                         0x00000000, 0xffffffff },
8187                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8188                         0x00000000, 0xffffffff },
8189                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8190                         0x00000000, 0xffffffff },
8191                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8192                         0x00000000, 0x000000ff },
8193                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8194                         0x00000000, 0xffffffff },
8195                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8196                         0x00000000, 0x000000ff },
8197                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8198                         0x00000000, 0xffffffff },
8199                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8200                         0x00000000, 0xffffffff },
8201                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8202                         0x00000000, 0xffffffff },
8203                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8204                         0x00000000, 0xffffffff },
8205                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8206                         0x00000000, 0xffffffff },
8207                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8208                         0xffffffff, 0x00000000 },
8209                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8210                         0xffffffff, 0x00000000 },
8211
8212                 /* Buffer Manager Control Registers. */
8213                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8214                         0x00000000, 0x007fff80 },
8215                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8216                         0x00000000, 0x007fffff },
8217                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8218                         0x00000000, 0x0000003f },
8219                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8220                         0x00000000, 0x000001ff },
8221                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8222                         0x00000000, 0x000001ff },
8223                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8224                         0xffffffff, 0x00000000 },
8225                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8226                         0xffffffff, 0x00000000 },
8227         
8228                 /* Mailbox Registers */
8229                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8230                         0x00000000, 0x000001ff },
8231                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8232                         0x00000000, 0x000001ff },
8233                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8234                         0x00000000, 0x000007ff },
8235                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8236                         0x00000000, 0x000001ff },
8237
8238                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8239         };
8240
8241         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8242                 is_5705 = 1;
8243         else
8244                 is_5705 = 0;
8245
8246         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8247                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8248                         continue;
8249
8250                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8251                         continue;
8252
8253                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8254                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8255                         continue;
8256
8257                 offset = (u32) reg_tbl[i].offset;
8258                 read_mask = reg_tbl[i].read_mask;
8259                 write_mask = reg_tbl[i].write_mask;
8260
8261                 /* Save the original register content */
8262                 save_val = tr32(offset);
8263
8264                 /* Determine the read-only value. */
8265                 read_val = save_val & read_mask;
8266
8267                 /* Write zero to the register, then make sure the read-only bits
8268                  * are not changed and the read/write bits are all zeros.
8269                  */
8270                 tw32(offset, 0);
8271
8272                 val = tr32(offset);
8273
8274                 /* Test the read-only and read/write bits. */
8275                 if (((val & read_mask) != read_val) || (val & write_mask))
8276                         goto out;
8277
8278                 /* Write ones to all the bits defined by RdMask and WrMask, then
8279                  * make sure the read-only bits are not changed and the
8280                  * read/write bits are all ones.
8281                  */
8282                 tw32(offset, read_mask | write_mask);
8283
8284                 val = tr32(offset);
8285
8286                 /* Test the read-only bits. */
8287                 if ((val & read_mask) != read_val)
8288                         goto out;
8289
8290                 /* Test the read/write bits. */
8291                 if ((val & write_mask) != write_mask)
8292                         goto out;
8293
8294                 tw32(offset, save_val);
8295         }
8296
8297         return 0;
8298
8299 out:
8300         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8301         tw32(offset, save_val);
8302         return -EIO;
8303 }
8304
8305 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8306 {
8307         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8308         int i;
8309         u32 j;
8310
8311         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8312                 for (j = 0; j < len; j += 4) {
8313                         u32 val;
8314
8315                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8316                         tg3_read_mem(tp, offset + j, &val);
8317                         if (val != test_pattern[i])
8318                                 return -EIO;
8319                 }
8320         }
8321         return 0;
8322 }
8323
8324 static int tg3_test_memory(struct tg3 *tp)
8325 {
8326         static struct mem_entry {
8327                 u32 offset;
8328                 u32 len;
8329         } mem_tbl_570x[] = {
8330                 { 0x00000000, 0x00b50},
8331                 { 0x00002000, 0x1c000},
8332                 { 0xffffffff, 0x00000}
8333         }, mem_tbl_5705[] = {
8334                 { 0x00000100, 0x0000c},
8335                 { 0x00000200, 0x00008},
8336                 { 0x00004000, 0x00800},
8337                 { 0x00006000, 0x01000},
8338                 { 0x00008000, 0x02000},
8339                 { 0x00010000, 0x0e000},
8340                 { 0xffffffff, 0x00000}
8341         }, mem_tbl_5755[] = {
8342                 { 0x00000200, 0x00008},
8343                 { 0x00004000, 0x00800},
8344                 { 0x00006000, 0x00800},
8345                 { 0x00008000, 0x02000},
8346                 { 0x00010000, 0x0c000},
8347                 { 0xffffffff, 0x00000}
8348         };
8349         struct mem_entry *mem_tbl;
8350         int err = 0;
8351         int i;
8352
8353         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8354                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8355                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8356                         mem_tbl = mem_tbl_5755;
8357                 else
8358                         mem_tbl = mem_tbl_5705;
8359         } else
8360                 mem_tbl = mem_tbl_570x;
8361
8362         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8363                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8364                     mem_tbl[i].len)) != 0)
8365                         break;
8366         }
8367         
8368         return err;
8369 }
8370
8371 #define TG3_MAC_LOOPBACK        0
8372 #define TG3_PHY_LOOPBACK        1
8373
8374 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8375 {
8376         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8377         u32 desc_idx;
8378         struct sk_buff *skb, *rx_skb;
8379         u8 *tx_data;
8380         dma_addr_t map;
8381         int num_pkts, tx_len, rx_len, i, err;
8382         struct tg3_rx_buffer_desc *desc;
8383
8384         if (loopback_mode == TG3_MAC_LOOPBACK) {
8385                 /* HW errata - mac loopback fails in some cases on 5780.
8386                  * Normal traffic and PHY loopback are not affected by
8387                  * errata.
8388                  */
8389                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8390                         return 0;
8391
8392                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8393                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8394                            MAC_MODE_PORT_MODE_GMII;
8395                 tw32(MAC_MODE, mac_mode);
8396         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8397                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8398                                            BMCR_SPEED1000);
8399                 udelay(40);
8400                 /* reset to prevent losing 1st rx packet intermittently */
8401                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8402                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8403                         udelay(10);
8404                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8405                 }
8406                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8407                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8408                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8409                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8410                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8411                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8412                 }
8413                 tw32(MAC_MODE, mac_mode);
8414         }
8415         else
8416                 return -EINVAL;
8417
8418         err = -EIO;
8419
8420         tx_len = 1514;
8421         skb = dev_alloc_skb(tx_len);
8422         tx_data = skb_put(skb, tx_len);
8423         memcpy(tx_data, tp->dev->dev_addr, 6);
8424         memset(tx_data + 6, 0x0, 8);
8425
8426         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8427
8428         for (i = 14; i < tx_len; i++)
8429                 tx_data[i] = (u8) (i & 0xff);
8430
8431         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8432
8433         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8434              HOSTCC_MODE_NOW);
8435
8436         udelay(10);
8437
8438         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8439
8440         num_pkts = 0;
8441
8442         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8443
8444         tp->tx_prod++;
8445         num_pkts++;
8446
8447         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8448                      tp->tx_prod);
8449         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8450
8451         udelay(10);
8452
8453         for (i = 0; i < 10; i++) {
8454                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8455                        HOSTCC_MODE_NOW);
8456
8457                 udelay(10);
8458
8459                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8460                 rx_idx = tp->hw_status->idx[0].rx_producer;
8461                 if ((tx_idx == tp->tx_prod) &&
8462                     (rx_idx == (rx_start_idx + num_pkts)))
8463                         break;
8464         }
8465
8466         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8467         dev_kfree_skb(skb);
8468
8469         if (tx_idx != tp->tx_prod)
8470                 goto out;
8471
8472         if (rx_idx != rx_start_idx + num_pkts)
8473                 goto out;
8474
8475         desc = &tp->rx_rcb[rx_start_idx];
8476         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8477         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8478         if (opaque_key != RXD_OPAQUE_RING_STD)
8479                 goto out;
8480
8481         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8482             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8483                 goto out;
8484
8485         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8486         if (rx_len != tx_len)
8487                 goto out;
8488
8489         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8490
8491         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8492         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8493
8494         for (i = 14; i < tx_len; i++) {
8495                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8496                         goto out;
8497         }
8498         err = 0;
8499         
8500         /* tg3_free_rings will unmap and free the rx_skb */
8501 out:
8502         return err;
8503 }
8504
8505 #define TG3_MAC_LOOPBACK_FAILED         1
8506 #define TG3_PHY_LOOPBACK_FAILED         2
8507 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8508                                          TG3_PHY_LOOPBACK_FAILED)
8509
8510 static int tg3_test_loopback(struct tg3 *tp)
8511 {
8512         int err = 0;
8513
8514         if (!netif_running(tp->dev))
8515                 return TG3_LOOPBACK_FAILED;
8516
8517         tg3_reset_hw(tp);
8518
8519         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8520                 err |= TG3_MAC_LOOPBACK_FAILED;
8521         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8522                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8523                         err |= TG3_PHY_LOOPBACK_FAILED;
8524         }
8525
8526         return err;
8527 }
8528
8529 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8530                           u64 *data)
8531 {
8532         struct tg3 *tp = netdev_priv(dev);
8533
8534         if (tp->link_config.phy_is_low_power)
8535                 tg3_set_power_state(tp, PCI_D0);
8536
8537         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8538
8539         if (tg3_test_nvram(tp) != 0) {
8540                 etest->flags |= ETH_TEST_FL_FAILED;
8541                 data[0] = 1;
8542         }
8543         if (tg3_test_link(tp) != 0) {
8544                 etest->flags |= ETH_TEST_FL_FAILED;
8545                 data[1] = 1;
8546         }
8547         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8548                 int err, irq_sync = 0;
8549
8550                 if (netif_running(dev)) {
8551                         tg3_netif_stop(tp);
8552                         irq_sync = 1;
8553                 }
8554
8555                 tg3_full_lock(tp, irq_sync);
8556
8557                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8558                 err = tg3_nvram_lock(tp);
8559                 tg3_halt_cpu(tp, RX_CPU_BASE);
8560                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8561                         tg3_halt_cpu(tp, TX_CPU_BASE);
8562                 if (!err)
8563                         tg3_nvram_unlock(tp);
8564
8565                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8566                         tg3_phy_reset(tp);
8567
8568                 if (tg3_test_registers(tp) != 0) {
8569                         etest->flags |= ETH_TEST_FL_FAILED;
8570                         data[2] = 1;
8571                 }
8572                 if (tg3_test_memory(tp) != 0) {
8573                         etest->flags |= ETH_TEST_FL_FAILED;
8574                         data[3] = 1;
8575                 }
8576                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8577                         etest->flags |= ETH_TEST_FL_FAILED;
8578
8579                 tg3_full_unlock(tp);
8580
8581                 if (tg3_test_interrupt(tp) != 0) {
8582                         etest->flags |= ETH_TEST_FL_FAILED;
8583                         data[5] = 1;
8584                 }
8585
8586                 tg3_full_lock(tp, 0);
8587
8588                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8589                 if (netif_running(dev)) {
8590                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8591                         tg3_init_hw(tp);
8592                         tg3_netif_start(tp);
8593                 }
8594
8595                 tg3_full_unlock(tp);
8596         }
8597         if (tp->link_config.phy_is_low_power)
8598                 tg3_set_power_state(tp, PCI_D3hot);
8599
8600 }
8601
8602 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8603 {
8604         struct mii_ioctl_data *data = if_mii(ifr);
8605         struct tg3 *tp = netdev_priv(dev);
8606         int err;
8607
8608         switch(cmd) {
8609         case SIOCGMIIPHY:
8610                 data->phy_id = PHY_ADDR;
8611
8612                 /* fallthru */
8613         case SIOCGMIIREG: {
8614                 u32 mii_regval;
8615
8616                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8617                         break;                  /* We have no PHY */
8618
8619                 if (tp->link_config.phy_is_low_power)
8620                         return -EAGAIN;
8621
8622                 spin_lock_bh(&tp->lock);
8623                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8624                 spin_unlock_bh(&tp->lock);
8625
8626                 data->val_out = mii_regval;
8627
8628                 return err;
8629         }
8630
8631         case SIOCSMIIREG:
8632                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8633                         break;                  /* We have no PHY */
8634
8635                 if (!capable(CAP_NET_ADMIN))
8636                         return -EPERM;
8637
8638                 if (tp->link_config.phy_is_low_power)
8639                         return -EAGAIN;
8640
8641                 spin_lock_bh(&tp->lock);
8642                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8643                 spin_unlock_bh(&tp->lock);
8644
8645                 return err;
8646
8647         default:
8648                 /* do nothing */
8649                 break;
8650         }
8651         return -EOPNOTSUPP;
8652 }
8653
8654 #if TG3_VLAN_TAG_USED
8655 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8656 {
8657         struct tg3 *tp = netdev_priv(dev);
8658
8659         tg3_full_lock(tp, 0);
8660
8661         tp->vlgrp = grp;
8662
8663         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8664         __tg3_set_rx_mode(dev);
8665
8666         tg3_full_unlock(tp);
8667 }
8668
8669 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8670 {
8671         struct tg3 *tp = netdev_priv(dev);
8672
8673         tg3_full_lock(tp, 0);
8674         if (tp->vlgrp)
8675                 tp->vlgrp->vlan_devices[vid] = NULL;
8676         tg3_full_unlock(tp);
8677 }
8678 #endif
8679
8680 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8681 {
8682         struct tg3 *tp = netdev_priv(dev);
8683
8684         memcpy(ec, &tp->coal, sizeof(*ec));
8685         return 0;
8686 }
8687
8688 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8689 {
8690         struct tg3 *tp = netdev_priv(dev);
8691         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8692         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8693
8694         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8695                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8696                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8697                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8698                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8699         }
8700
8701         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8702             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8703             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8704             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8705             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8706             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8707             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8708             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8709             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8710             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8711                 return -EINVAL;
8712
8713         /* No rx interrupts will be generated if both are zero */
8714         if ((ec->rx_coalesce_usecs == 0) &&
8715             (ec->rx_max_coalesced_frames == 0))
8716                 return -EINVAL;
8717
8718         /* No tx interrupts will be generated if both are zero */
8719         if ((ec->tx_coalesce_usecs == 0) &&
8720             (ec->tx_max_coalesced_frames == 0))
8721                 return -EINVAL;
8722
8723         /* Only copy relevant parameters, ignore all others. */
8724         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8725         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8726         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8727         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8728         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8729         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8730         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8731         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8732         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8733
8734         if (netif_running(dev)) {
8735                 tg3_full_lock(tp, 0);
8736                 __tg3_set_coalesce(tp, &tp->coal);
8737                 tg3_full_unlock(tp);
8738         }
8739         return 0;
8740 }
8741
8742 static struct ethtool_ops tg3_ethtool_ops = {
8743         .get_settings           = tg3_get_settings,
8744         .set_settings           = tg3_set_settings,
8745         .get_drvinfo            = tg3_get_drvinfo,
8746         .get_regs_len           = tg3_get_regs_len,
8747         .get_regs               = tg3_get_regs,
8748         .get_wol                = tg3_get_wol,
8749         .set_wol                = tg3_set_wol,
8750         .get_msglevel           = tg3_get_msglevel,
8751         .set_msglevel           = tg3_set_msglevel,
8752         .nway_reset             = tg3_nway_reset,
8753         .get_link               = ethtool_op_get_link,
8754         .get_eeprom_len         = tg3_get_eeprom_len,
8755         .get_eeprom             = tg3_get_eeprom,
8756         .set_eeprom             = tg3_set_eeprom,
8757         .get_ringparam          = tg3_get_ringparam,
8758         .set_ringparam          = tg3_set_ringparam,
8759         .get_pauseparam         = tg3_get_pauseparam,
8760         .set_pauseparam         = tg3_set_pauseparam,
8761         .get_rx_csum            = tg3_get_rx_csum,
8762         .set_rx_csum            = tg3_set_rx_csum,
8763         .get_tx_csum            = ethtool_op_get_tx_csum,
8764         .set_tx_csum            = tg3_set_tx_csum,
8765         .get_sg                 = ethtool_op_get_sg,
8766         .set_sg                 = ethtool_op_set_sg,
8767 #if TG3_TSO_SUPPORT != 0
8768         .get_tso                = ethtool_op_get_tso,
8769         .set_tso                = tg3_set_tso,
8770 #endif
8771         .self_test_count        = tg3_get_test_count,
8772         .self_test              = tg3_self_test,
8773         .get_strings            = tg3_get_strings,
8774         .phys_id                = tg3_phys_id,
8775         .get_stats_count        = tg3_get_stats_count,
8776         .get_ethtool_stats      = tg3_get_ethtool_stats,
8777         .get_coalesce           = tg3_get_coalesce,
8778         .set_coalesce           = tg3_set_coalesce,
8779         .get_perm_addr          = ethtool_op_get_perm_addr,
8780 };
8781
8782 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8783 {
8784         u32 cursize, val, magic;
8785
8786         tp->nvram_size = EEPROM_CHIP_SIZE;
8787
8788         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8789                 return;
8790
8791         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8792                 return;
8793
8794         /*
8795          * Size the chip by reading offsets at increasing powers of two.
8796          * When we encounter our validation signature, we know the addressing
8797          * has wrapped around, and thus have our chip size.
8798          */
8799         cursize = 0x10;
8800
8801         while (cursize < tp->nvram_size) {
8802                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8803                         return;
8804
8805                 if (val == magic)
8806                         break;
8807
8808                 cursize <<= 1;
8809         }
8810
8811         tp->nvram_size = cursize;
8812 }
8813                 
8814 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8815 {
8816         u32 val;
8817
8818         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8819                 return;
8820
8821         /* Selfboot format */
8822         if (val != TG3_EEPROM_MAGIC) {
8823                 tg3_get_eeprom_size(tp);
8824                 return;
8825         }
8826
8827         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8828                 if (val != 0) {
8829                         tp->nvram_size = (val >> 16) * 1024;
8830                         return;
8831                 }
8832         }
8833         tp->nvram_size = 0x20000;
8834 }
8835
8836 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8837 {
8838         u32 nvcfg1;
8839
8840         nvcfg1 = tr32(NVRAM_CFG1);
8841         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8842                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8843         }
8844         else {
8845                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8846                 tw32(NVRAM_CFG1, nvcfg1);
8847         }
8848
8849         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8850             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8851                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8852                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8853                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8854                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8855                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8856                                 break;
8857                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8858                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8859                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8860                                 break;
8861                         case FLASH_VENDOR_ATMEL_EEPROM:
8862                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8863                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8864                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8865                                 break;
8866                         case FLASH_VENDOR_ST:
8867                                 tp->nvram_jedecnum = JEDEC_ST;
8868                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8869                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8870                                 break;
8871                         case FLASH_VENDOR_SAIFUN:
8872                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8873                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8874                                 break;
8875                         case FLASH_VENDOR_SST_SMALL:
8876                         case FLASH_VENDOR_SST_LARGE:
8877                                 tp->nvram_jedecnum = JEDEC_SST;
8878                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8879                                 break;
8880                 }
8881         }
8882         else {
8883                 tp->nvram_jedecnum = JEDEC_ATMEL;
8884                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8885                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8886         }
8887 }
8888
8889 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8890 {
8891         u32 nvcfg1;
8892
8893         nvcfg1 = tr32(NVRAM_CFG1);
8894
8895         /* NVRAM protection for TPM */
8896         if (nvcfg1 & (1 << 27))
8897                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8898
8899         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8900                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8901                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8902                         tp->nvram_jedecnum = JEDEC_ATMEL;
8903                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8904                         break;
8905                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8906                         tp->nvram_jedecnum = JEDEC_ATMEL;
8907                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8908                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8909                         break;
8910                 case FLASH_5752VENDOR_ST_M45PE10:
8911                 case FLASH_5752VENDOR_ST_M45PE20:
8912                 case FLASH_5752VENDOR_ST_M45PE40:
8913                         tp->nvram_jedecnum = JEDEC_ST;
8914                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8915                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8916                         break;
8917         }
8918
8919         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8920                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8921                         case FLASH_5752PAGE_SIZE_256:
8922                                 tp->nvram_pagesize = 256;
8923                                 break;
8924                         case FLASH_5752PAGE_SIZE_512:
8925                                 tp->nvram_pagesize = 512;
8926                                 break;
8927                         case FLASH_5752PAGE_SIZE_1K:
8928                                 tp->nvram_pagesize = 1024;
8929                                 break;
8930                         case FLASH_5752PAGE_SIZE_2K:
8931                                 tp->nvram_pagesize = 2048;
8932                                 break;
8933                         case FLASH_5752PAGE_SIZE_4K:
8934                                 tp->nvram_pagesize = 4096;
8935                                 break;
8936                         case FLASH_5752PAGE_SIZE_264:
8937                                 tp->nvram_pagesize = 264;
8938                                 break;
8939                 }
8940         }
8941         else {
8942                 /* For eeprom, set pagesize to maximum eeprom size */
8943                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8944
8945                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8946                 tw32(NVRAM_CFG1, nvcfg1);
8947         }
8948 }
8949
8950 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
8951 {
8952         u32 nvcfg1;
8953
8954         nvcfg1 = tr32(NVRAM_CFG1);
8955
8956         /* NVRAM protection for TPM */
8957         if (nvcfg1 & (1 << 27))
8958                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8959
8960         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8961                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
8962                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
8963                         tp->nvram_jedecnum = JEDEC_ATMEL;
8964                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8965                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8966
8967                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8968                         tw32(NVRAM_CFG1, nvcfg1);
8969                         break;
8970                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8971                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8972                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8973                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8974                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
8975                         tp->nvram_jedecnum = JEDEC_ATMEL;
8976                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8977                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8978                         tp->nvram_pagesize = 264;
8979                         break;
8980                 case FLASH_5752VENDOR_ST_M45PE10:
8981                 case FLASH_5752VENDOR_ST_M45PE20:
8982                 case FLASH_5752VENDOR_ST_M45PE40:
8983                         tp->nvram_jedecnum = JEDEC_ST;
8984                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8985                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8986                         tp->nvram_pagesize = 256;
8987                         break;
8988         }
8989 }
8990
8991 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
8992 {
8993         u32 nvcfg1;
8994
8995         nvcfg1 = tr32(NVRAM_CFG1);
8996
8997         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8998                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
8999                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9000                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9001                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9002                         tp->nvram_jedecnum = JEDEC_ATMEL;
9003                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9004                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9005
9006                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9007                         tw32(NVRAM_CFG1, nvcfg1);
9008                         break;
9009                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9010                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9011                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9012                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9013                         tp->nvram_jedecnum = JEDEC_ATMEL;
9014                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9015                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9016                         tp->nvram_pagesize = 264;
9017                         break;
9018                 case FLASH_5752VENDOR_ST_M45PE10:
9019                 case FLASH_5752VENDOR_ST_M45PE20:
9020                 case FLASH_5752VENDOR_ST_M45PE40:
9021                         tp->nvram_jedecnum = JEDEC_ST;
9022                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9023                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9024                         tp->nvram_pagesize = 256;
9025                         break;
9026         }
9027 }
9028
9029 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9030 static void __devinit tg3_nvram_init(struct tg3 *tp)
9031 {
9032         int j;
9033
9034         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9035                 return;
9036
9037         tw32_f(GRC_EEPROM_ADDR,
9038              (EEPROM_ADDR_FSM_RESET |
9039               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9040                EEPROM_ADDR_CLKPERD_SHIFT)));
9041
9042         /* XXX schedule_timeout() ... */
9043         for (j = 0; j < 100; j++)
9044                 udelay(10);
9045
9046         /* Enable seeprom accesses. */
9047         tw32_f(GRC_LOCAL_CTRL,
9048              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9049         udelay(100);
9050
9051         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9052             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9053                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9054
9055                 if (tg3_nvram_lock(tp)) {
9056                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9057                                "tg3_nvram_init failed.\n", tp->dev->name);
9058                         return;
9059                 }
9060                 tg3_enable_nvram_access(tp);
9061
9062                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9063                         tg3_get_5752_nvram_info(tp);
9064                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9065                         tg3_get_5755_nvram_info(tp);
9066                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9067                         tg3_get_5787_nvram_info(tp);
9068                 else
9069                         tg3_get_nvram_info(tp);
9070
9071                 tg3_get_nvram_size(tp);
9072
9073                 tg3_disable_nvram_access(tp);
9074                 tg3_nvram_unlock(tp);
9075
9076         } else {
9077                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9078
9079                 tg3_get_eeprom_size(tp);
9080         }
9081 }
9082
9083 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9084                                         u32 offset, u32 *val)
9085 {
9086         u32 tmp;
9087         int i;
9088
9089         if (offset > EEPROM_ADDR_ADDR_MASK ||
9090             (offset % 4) != 0)
9091                 return -EINVAL;
9092
9093         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9094                                         EEPROM_ADDR_DEVID_MASK |
9095                                         EEPROM_ADDR_READ);
9096         tw32(GRC_EEPROM_ADDR,
9097              tmp |
9098              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9099              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9100               EEPROM_ADDR_ADDR_MASK) |
9101              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9102
9103         for (i = 0; i < 10000; i++) {
9104                 tmp = tr32(GRC_EEPROM_ADDR);
9105
9106                 if (tmp & EEPROM_ADDR_COMPLETE)
9107                         break;
9108                 udelay(100);
9109         }
9110         if (!(tmp & EEPROM_ADDR_COMPLETE))
9111                 return -EBUSY;
9112
9113         *val = tr32(GRC_EEPROM_DATA);
9114         return 0;
9115 }
9116
9117 #define NVRAM_CMD_TIMEOUT 10000
9118
9119 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9120 {
9121         int i;
9122
9123         tw32(NVRAM_CMD, nvram_cmd);
9124         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9125                 udelay(10);
9126                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9127                         udelay(10);
9128                         break;
9129                 }
9130         }
9131         if (i == NVRAM_CMD_TIMEOUT) {
9132                 return -EBUSY;
9133         }
9134         return 0;
9135 }
9136
9137 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9138 {
9139         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9140             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9141             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9142             (tp->nvram_jedecnum == JEDEC_ATMEL))
9143
9144                 addr = ((addr / tp->nvram_pagesize) <<
9145                         ATMEL_AT45DB0X1B_PAGE_POS) +
9146                        (addr % tp->nvram_pagesize);
9147
9148         return addr;
9149 }
9150
9151 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9152 {
9153         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9154             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9155             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9156             (tp->nvram_jedecnum == JEDEC_ATMEL))
9157
9158                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9159                         tp->nvram_pagesize) +
9160                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9161
9162         return addr;
9163 }
9164
9165 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9166 {
9167         int ret;
9168
9169         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9170                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9171                 return -EINVAL;
9172         }
9173
9174         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9175                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9176
9177         offset = tg3_nvram_phys_addr(tp, offset);
9178
9179         if (offset > NVRAM_ADDR_MSK)
9180                 return -EINVAL;
9181
9182         ret = tg3_nvram_lock(tp);
9183         if (ret)
9184                 return ret;
9185
9186         tg3_enable_nvram_access(tp);
9187
9188         tw32(NVRAM_ADDR, offset);
9189         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9190                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9191
9192         if (ret == 0)
9193                 *val = swab32(tr32(NVRAM_RDDATA));
9194
9195         tg3_disable_nvram_access(tp);
9196
9197         tg3_nvram_unlock(tp);
9198
9199         return ret;
9200 }
9201
9202 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9203 {
9204         int err;
9205         u32 tmp;
9206
9207         err = tg3_nvram_read(tp, offset, &tmp);
9208         *val = swab32(tmp);
9209         return err;
9210 }
9211
9212 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9213                                     u32 offset, u32 len, u8 *buf)
9214 {
9215         int i, j, rc = 0;
9216         u32 val;
9217
9218         for (i = 0; i < len; i += 4) {
9219                 u32 addr, data;
9220
9221                 addr = offset + i;
9222
9223                 memcpy(&data, buf + i, 4);
9224
9225                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9226
9227                 val = tr32(GRC_EEPROM_ADDR);
9228                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9229
9230                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9231                         EEPROM_ADDR_READ);
9232                 tw32(GRC_EEPROM_ADDR, val |
9233                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9234                         (addr & EEPROM_ADDR_ADDR_MASK) |
9235                         EEPROM_ADDR_START |
9236                         EEPROM_ADDR_WRITE);
9237                 
9238                 for (j = 0; j < 10000; j++) {
9239                         val = tr32(GRC_EEPROM_ADDR);
9240
9241                         if (val & EEPROM_ADDR_COMPLETE)
9242                                 break;
9243                         udelay(100);
9244                 }
9245                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9246                         rc = -EBUSY;
9247                         break;
9248                 }
9249         }
9250
9251         return rc;
9252 }
9253
9254 /* offset and length are dword aligned */
9255 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9256                 u8 *buf)
9257 {
9258         int ret = 0;
9259         u32 pagesize = tp->nvram_pagesize;
9260         u32 pagemask = pagesize - 1;
9261         u32 nvram_cmd;
9262         u8 *tmp;
9263
9264         tmp = kmalloc(pagesize, GFP_KERNEL);
9265         if (tmp == NULL)
9266                 return -ENOMEM;
9267
9268         while (len) {
9269                 int j;
9270                 u32 phy_addr, page_off, size;
9271
9272                 phy_addr = offset & ~pagemask;
9273         
9274                 for (j = 0; j < pagesize; j += 4) {
9275                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9276                                                 (u32 *) (tmp + j))))
9277                                 break;
9278                 }
9279                 if (ret)
9280                         break;
9281
9282                 page_off = offset & pagemask;
9283                 size = pagesize;
9284                 if (len < size)
9285                         size = len;
9286
9287                 len -= size;
9288
9289                 memcpy(tmp + page_off, buf, size);
9290
9291                 offset = offset + (pagesize - page_off);
9292
9293                 tg3_enable_nvram_access(tp);
9294
9295                 /*
9296                  * Before we can erase the flash page, we need
9297                  * to issue a special "write enable" command.
9298                  */
9299                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9300
9301                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9302                         break;
9303
9304                 /* Erase the target page */
9305                 tw32(NVRAM_ADDR, phy_addr);
9306
9307                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9308                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9309
9310                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9311                         break;
9312
9313                 /* Issue another write enable to start the write. */
9314                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9315
9316                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9317                         break;
9318
9319                 for (j = 0; j < pagesize; j += 4) {
9320                         u32 data;
9321
9322                         data = *((u32 *) (tmp + j));
9323                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9324
9325                         tw32(NVRAM_ADDR, phy_addr + j);
9326
9327                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9328                                 NVRAM_CMD_WR;
9329
9330                         if (j == 0)
9331                                 nvram_cmd |= NVRAM_CMD_FIRST;
9332                         else if (j == (pagesize - 4))
9333                                 nvram_cmd |= NVRAM_CMD_LAST;
9334
9335                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9336                                 break;
9337                 }
9338                 if (ret)
9339                         break;
9340         }
9341
9342         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9343         tg3_nvram_exec_cmd(tp, nvram_cmd);
9344
9345         kfree(tmp);
9346
9347         return ret;
9348 }
9349
9350 /* offset and length are dword aligned */
9351 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9352                 u8 *buf)
9353 {
9354         int i, ret = 0;
9355
9356         for (i = 0; i < len; i += 4, offset += 4) {
9357                 u32 data, page_off, phy_addr, nvram_cmd;
9358
9359                 memcpy(&data, buf + i, 4);
9360                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9361
9362                 page_off = offset % tp->nvram_pagesize;
9363
9364                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9365
9366                 tw32(NVRAM_ADDR, phy_addr);
9367
9368                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9369
9370                 if ((page_off == 0) || (i == 0))
9371                         nvram_cmd |= NVRAM_CMD_FIRST;
9372                 else if (page_off == (tp->nvram_pagesize - 4))
9373                         nvram_cmd |= NVRAM_CMD_LAST;
9374
9375                 if (i == (len - 4))
9376                         nvram_cmd |= NVRAM_CMD_LAST;
9377
9378                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9379                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9380                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9381                     (tp->nvram_jedecnum == JEDEC_ST) &&
9382                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9383
9384                         if ((ret = tg3_nvram_exec_cmd(tp,
9385                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9386                                 NVRAM_CMD_DONE)))
9387
9388                                 break;
9389                 }
9390                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9391                         /* We always do complete word writes to eeprom. */
9392                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9393                 }
9394
9395                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9396                         break;
9397         }
9398         return ret;
9399 }
9400
9401 /* offset and length are dword aligned */
9402 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9403 {
9404         int ret;
9405
9406         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9407                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9408                 return -EINVAL;
9409         }
9410
9411         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9412                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9413                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9414                 udelay(40);
9415         }
9416
9417         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9418                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9419         }
9420         else {
9421                 u32 grc_mode;
9422
9423                 ret = tg3_nvram_lock(tp);
9424                 if (ret)
9425                         return ret;
9426
9427                 tg3_enable_nvram_access(tp);
9428                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9429                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9430                         tw32(NVRAM_WRITE1, 0x406);
9431
9432                 grc_mode = tr32(GRC_MODE);
9433                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9434
9435                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9436                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9437
9438                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9439                                 buf);
9440                 }
9441                 else {
9442                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9443                                 buf);
9444                 }
9445
9446                 grc_mode = tr32(GRC_MODE);
9447                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9448
9449                 tg3_disable_nvram_access(tp);
9450                 tg3_nvram_unlock(tp);
9451         }
9452
9453         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9454                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9455                 udelay(40);
9456         }
9457
9458         return ret;
9459 }
9460
9461 struct subsys_tbl_ent {
9462         u16 subsys_vendor, subsys_devid;
9463         u32 phy_id;
9464 };
9465
9466 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9467         /* Broadcom boards. */
9468         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9469         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9470         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9471         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9472         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9473         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9474         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9475         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9476         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9477         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9478         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9479
9480         /* 3com boards. */
9481         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9482         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9483         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9484         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9485         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9486
9487         /* DELL boards. */
9488         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9489         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9490         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9491         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9492
9493         /* Compaq boards. */
9494         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9495         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9496         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9497         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9498         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9499
9500         /* IBM boards. */
9501         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9502 };
9503
9504 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9505 {
9506         int i;
9507
9508         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9509                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9510                      tp->pdev->subsystem_vendor) &&
9511                     (subsys_id_to_phy_id[i].subsys_devid ==
9512                      tp->pdev->subsystem_device))
9513                         return &subsys_id_to_phy_id[i];
9514         }
9515         return NULL;
9516 }
9517
9518 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9519 {
9520         u32 val;
9521         u16 pmcsr;
9522
9523         /* On some early chips the SRAM cannot be accessed in D3hot state,
9524          * so need make sure we're in D0.
9525          */
9526         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9527         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9528         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9529         msleep(1);
9530
9531         /* Make sure register accesses (indirect or otherwise)
9532          * will function correctly.
9533          */
9534         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9535                                tp->misc_host_ctrl);
9536
9537         tp->phy_id = PHY_ID_INVALID;
9538         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9539
9540         /* Do not even try poking around in here on Sun parts.  */
9541         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9542                 return;
9543
9544         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9545         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9546                 u32 nic_cfg, led_cfg;
9547                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9548                 int eeprom_phy_serdes = 0;
9549
9550                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9551                 tp->nic_sram_data_cfg = nic_cfg;
9552
9553                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9554                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9555                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9556                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9557                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9558                     (ver > 0) && (ver < 0x100))
9559                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9560
9561                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9562                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9563                         eeprom_phy_serdes = 1;
9564
9565                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9566                 if (nic_phy_id != 0) {
9567                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9568                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9569
9570                         eeprom_phy_id  = (id1 >> 16) << 10;
9571                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9572                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9573                 } else
9574                         eeprom_phy_id = 0;
9575
9576                 tp->phy_id = eeprom_phy_id;
9577                 if (eeprom_phy_serdes) {
9578                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9579                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9580                         else
9581                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9582                 }
9583
9584                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9585                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9586                                     SHASTA_EXT_LED_MODE_MASK);
9587                 else
9588                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9589
9590                 switch (led_cfg) {
9591                 default:
9592                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9593                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9594                         break;
9595
9596                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9597                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9598                         break;
9599
9600                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9601                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9602
9603                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9604                          * read on some older 5700/5701 bootcode.
9605                          */
9606                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9607                             ASIC_REV_5700 ||
9608                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9609                             ASIC_REV_5701)
9610                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9611
9612                         break;
9613
9614                 case SHASTA_EXT_LED_SHARED:
9615                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9616                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9617                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9618                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9619                                                  LED_CTRL_MODE_PHY_2);
9620                         break;
9621
9622                 case SHASTA_EXT_LED_MAC:
9623                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9624                         break;
9625
9626                 case SHASTA_EXT_LED_COMBO:
9627                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9628                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9629                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9630                                                  LED_CTRL_MODE_PHY_2);
9631                         break;
9632
9633                 };
9634
9635                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9636                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9637                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9638                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9639
9640                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9641                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9642                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9643                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9644
9645                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9646                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9647                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9648                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9649                 }
9650                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9651                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9652
9653                 if (cfg2 & (1 << 17))
9654                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9655
9656                 /* serdes signal pre-emphasis in register 0x590 set by */
9657                 /* bootcode if bit 18 is set */
9658                 if (cfg2 & (1 << 18))
9659                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9660         }
9661 }
9662
9663 static int __devinit tg3_phy_probe(struct tg3 *tp)
9664 {
9665         u32 hw_phy_id_1, hw_phy_id_2;
9666         u32 hw_phy_id, hw_phy_id_masked;
9667         int err;
9668
9669         /* Reading the PHY ID register can conflict with ASF
9670          * firwmare access to the PHY hardware.
9671          */
9672         err = 0;
9673         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9674                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9675         } else {
9676                 /* Now read the physical PHY_ID from the chip and verify
9677                  * that it is sane.  If it doesn't look good, we fall back
9678                  * to either the hard-coded table based PHY_ID and failing
9679                  * that the value found in the eeprom area.
9680                  */
9681                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9682                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9683
9684                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9685                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9686                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9687
9688                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9689         }
9690
9691         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9692                 tp->phy_id = hw_phy_id;
9693                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9694                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9695                 else
9696                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9697         } else {
9698                 if (tp->phy_id != PHY_ID_INVALID) {
9699                         /* Do nothing, phy ID already set up in
9700                          * tg3_get_eeprom_hw_cfg().
9701                          */
9702                 } else {
9703                         struct subsys_tbl_ent *p;
9704
9705                         /* No eeprom signature?  Try the hardcoded
9706                          * subsys device table.
9707                          */
9708                         p = lookup_by_subsys(tp);
9709                         if (!p)
9710                                 return -ENODEV;
9711
9712                         tp->phy_id = p->phy_id;
9713                         if (!tp->phy_id ||
9714                             tp->phy_id == PHY_ID_BCM8002)
9715                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9716                 }
9717         }
9718
9719         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9720             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9721                 u32 bmsr, adv_reg, tg3_ctrl;
9722
9723                 tg3_readphy(tp, MII_BMSR, &bmsr);
9724                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9725                     (bmsr & BMSR_LSTATUS))
9726                         goto skip_phy_reset;
9727                     
9728                 err = tg3_phy_reset(tp);
9729                 if (err)
9730                         return err;
9731
9732                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9733                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9734                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9735                 tg3_ctrl = 0;
9736                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9737                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9738                                     MII_TG3_CTRL_ADV_1000_FULL);
9739                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9740                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9741                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9742                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9743                 }
9744
9745                 if (!tg3_copper_is_advertising_all(tp)) {
9746                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9747
9748                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9749                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9750
9751                         tg3_writephy(tp, MII_BMCR,
9752                                      BMCR_ANENABLE | BMCR_ANRESTART);
9753                 }
9754                 tg3_phy_set_wirespeed(tp);
9755
9756                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9757                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9758                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9759         }
9760
9761 skip_phy_reset:
9762         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9763                 err = tg3_init_5401phy_dsp(tp);
9764                 if (err)
9765                         return err;
9766         }
9767
9768         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9769                 err = tg3_init_5401phy_dsp(tp);
9770         }
9771
9772         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9773                 tp->link_config.advertising =
9774                         (ADVERTISED_1000baseT_Half |
9775                          ADVERTISED_1000baseT_Full |
9776                          ADVERTISED_Autoneg |
9777                          ADVERTISED_FIBRE);
9778         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9779                 tp->link_config.advertising &=
9780                         ~(ADVERTISED_1000baseT_Half |
9781                           ADVERTISED_1000baseT_Full);
9782
9783         return err;
9784 }
9785
9786 static void __devinit tg3_read_partno(struct tg3 *tp)
9787 {
9788         unsigned char vpd_data[256];
9789         int i;
9790         u32 magic;
9791
9792         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9793                 /* Sun decided not to put the necessary bits in the
9794                  * NVRAM of their onboard tg3 parts :(
9795                  */
9796                 strcpy(tp->board_part_number, "Sun 570X");
9797                 return;
9798         }
9799
9800         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9801                 return;
9802
9803         if (magic == TG3_EEPROM_MAGIC) {
9804                 for (i = 0; i < 256; i += 4) {
9805                         u32 tmp;
9806
9807                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9808                                 goto out_not_found;
9809
9810                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9811                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9812                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9813                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9814                 }
9815         } else {
9816                 int vpd_cap;
9817
9818                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9819                 for (i = 0; i < 256; i += 4) {
9820                         u32 tmp, j = 0;
9821                         u16 tmp16;
9822
9823                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9824                                               i);
9825                         while (j++ < 100) {
9826                                 pci_read_config_word(tp->pdev, vpd_cap +
9827                                                      PCI_VPD_ADDR, &tmp16);
9828                                 if (tmp16 & 0x8000)
9829                                         break;
9830                                 msleep(1);
9831                         }
9832                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9833                                               &tmp);
9834                         tmp = cpu_to_le32(tmp);
9835                         memcpy(&vpd_data[i], &tmp, 4);
9836                 }
9837         }
9838
9839         /* Now parse and find the part number. */
9840         for (i = 0; i < 256; ) {
9841                 unsigned char val = vpd_data[i];
9842                 int block_end;
9843
9844                 if (val == 0x82 || val == 0x91) {
9845                         i = (i + 3 +
9846                              (vpd_data[i + 1] +
9847                               (vpd_data[i + 2] << 8)));
9848                         continue;
9849                 }
9850
9851                 if (val != 0x90)
9852                         goto out_not_found;
9853
9854                 block_end = (i + 3 +
9855                              (vpd_data[i + 1] +
9856                               (vpd_data[i + 2] << 8)));
9857                 i += 3;
9858                 while (i < block_end) {
9859                         if (vpd_data[i + 0] == 'P' &&
9860                             vpd_data[i + 1] == 'N') {
9861                                 int partno_len = vpd_data[i + 2];
9862
9863                                 if (partno_len > 24)
9864                                         goto out_not_found;
9865
9866                                 memcpy(tp->board_part_number,
9867                                        &vpd_data[i + 3],
9868                                        partno_len);
9869
9870                                 /* Success. */
9871                                 return;
9872                         }
9873                 }
9874
9875                 /* Part number not found. */
9876                 goto out_not_found;
9877         }
9878
9879 out_not_found:
9880         strcpy(tp->board_part_number, "none");
9881 }
9882
9883 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9884 {
9885         u32 val, offset, start;
9886
9887         if (tg3_nvram_read_swab(tp, 0, &val))
9888                 return;
9889
9890         if (val != TG3_EEPROM_MAGIC)
9891                 return;
9892
9893         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9894             tg3_nvram_read_swab(tp, 0x4, &start))
9895                 return;
9896
9897         offset = tg3_nvram_logical_addr(tp, offset);
9898         if (tg3_nvram_read_swab(tp, offset, &val))
9899                 return;
9900
9901         if ((val & 0xfc000000) == 0x0c000000) {
9902                 u32 ver_offset, addr;
9903                 int i;
9904
9905                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9906                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9907                         return;
9908
9909                 if (val != 0)
9910                         return;
9911
9912                 addr = offset + ver_offset - start;
9913                 for (i = 0; i < 16; i += 4) {
9914                         if (tg3_nvram_read(tp, addr + i, &val))
9915                                 return;
9916
9917                         val = cpu_to_le32(val);
9918                         memcpy(tp->fw_ver + i, &val, 4);
9919                 }
9920         }
9921 }
9922
9923 #ifdef CONFIG_SPARC64
9924 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9925 {
9926         struct pci_dev *pdev = tp->pdev;
9927         struct pcidev_cookie *pcp = pdev->sysdata;
9928
9929         if (pcp != NULL) {
9930                 int node = pcp->prom_node;
9931                 u32 venid;
9932                 int err;
9933
9934                 err = prom_getproperty(node, "subsystem-vendor-id",
9935                                        (char *) &venid, sizeof(venid));
9936                 if (err == 0 || err == -1)
9937                         return 0;
9938                 if (venid == PCI_VENDOR_ID_SUN)
9939                         return 1;
9940
9941                 /* TG3 chips onboard the SunBlade-2500 don't have the
9942                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9943                  * are distinguishable from non-Sun variants by being
9944                  * named "network" by the firmware.  Non-Sun cards will
9945                  * show up as being named "ethernet".
9946                  */
9947                 if (!strcmp(pcp->prom_name, "network"))
9948                         return 1;
9949         }
9950         return 0;
9951 }
9952 #endif
9953
9954 static int __devinit tg3_get_invariants(struct tg3 *tp)
9955 {
9956         static struct pci_device_id write_reorder_chipsets[] = {
9957                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9958                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9959                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9960                              PCI_DEVICE_ID_VIA_8385_0) },
9961                 { },
9962         };
9963         u32 misc_ctrl_reg;
9964         u32 cacheline_sz_reg;
9965         u32 pci_state_reg, grc_misc_cfg;
9966         u32 val;
9967         u16 pci_cmd;
9968         int err;
9969
9970 #ifdef CONFIG_SPARC64
9971         if (tg3_is_sun_570X(tp))
9972                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9973 #endif
9974
9975         /* Force memory write invalidate off.  If we leave it on,
9976          * then on 5700_BX chips we have to enable a workaround.
9977          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9978          * to match the cacheline size.  The Broadcom driver have this
9979          * workaround but turns MWI off all the times so never uses
9980          * it.  This seems to suggest that the workaround is insufficient.
9981          */
9982         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9983         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9984         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9985
9986         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9987          * has the register indirect write enable bit set before
9988          * we try to access any of the MMIO registers.  It is also
9989          * critical that the PCI-X hw workaround situation is decided
9990          * before that as well.
9991          */
9992         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9993                               &misc_ctrl_reg);
9994
9995         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9996                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9997
9998         /* Wrong chip ID in 5752 A0. This code can be removed later
9999          * as A0 is not in production.
10000          */
10001         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10002                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10003
10004         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10005          * we need to disable memory and use config. cycles
10006          * only to access all registers. The 5702/03 chips
10007          * can mistakenly decode the special cycles from the
10008          * ICH chipsets as memory write cycles, causing corruption
10009          * of register and memory space. Only certain ICH bridges
10010          * will drive special cycles with non-zero data during the
10011          * address phase which can fall within the 5703's address
10012          * range. This is not an ICH bug as the PCI spec allows
10013          * non-zero address during special cycles. However, only
10014          * these ICH bridges are known to drive non-zero addresses
10015          * during special cycles.
10016          *
10017          * Since special cycles do not cross PCI bridges, we only
10018          * enable this workaround if the 5703 is on the secondary
10019          * bus of these ICH bridges.
10020          */
10021         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10022             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10023                 static struct tg3_dev_id {
10024                         u32     vendor;
10025                         u32     device;
10026                         u32     rev;
10027                 } ich_chipsets[] = {
10028                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10029                           PCI_ANY_ID },
10030                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10031                           PCI_ANY_ID },
10032                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10033                           0xa },
10034                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10035                           PCI_ANY_ID },
10036                         { },
10037                 };
10038                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10039                 struct pci_dev *bridge = NULL;
10040
10041                 while (pci_id->vendor != 0) {
10042                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10043                                                 bridge);
10044                         if (!bridge) {
10045                                 pci_id++;
10046                                 continue;
10047                         }
10048                         if (pci_id->rev != PCI_ANY_ID) {
10049                                 u8 rev;
10050
10051                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10052                                                      &rev);
10053                                 if (rev > pci_id->rev)
10054                                         continue;
10055                         }
10056                         if (bridge->subordinate &&
10057                             (bridge->subordinate->number ==
10058                              tp->pdev->bus->number)) {
10059
10060                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10061                                 pci_dev_put(bridge);
10062                                 break;
10063                         }
10064                 }
10065         }
10066
10067         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10068          * DMA addresses > 40-bit. This bridge may have other additional
10069          * 57xx devices behind it in some 4-port NIC designs for example.
10070          * Any tg3 device found behind the bridge will also need the 40-bit
10071          * DMA workaround.
10072          */
10073         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10074             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10075                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10076                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10077                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10078         }
10079         else {
10080                 struct pci_dev *bridge = NULL;
10081
10082                 do {
10083                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10084                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10085                                                 bridge);
10086                         if (bridge && bridge->subordinate &&
10087                             (bridge->subordinate->number <=
10088                              tp->pdev->bus->number) &&
10089                             (bridge->subordinate->subordinate >=
10090                              tp->pdev->bus->number)) {
10091                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10092                                 pci_dev_put(bridge);
10093                                 break;
10094                         }
10095                 } while (bridge);
10096         }
10097
10098         /* Initialize misc host control in PCI block. */
10099         tp->misc_host_ctrl |= (misc_ctrl_reg &
10100                                MISC_HOST_CTRL_CHIPREV);
10101         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10102                                tp->misc_host_ctrl);
10103
10104         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10105                               &cacheline_sz_reg);
10106
10107         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10108         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10109         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10110         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10111
10112         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10113             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10114             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10115             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10116             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10117                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10118
10119         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10120             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10121                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10122
10123         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10124                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10125                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10126                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10127                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10128                 } else
10129                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10130         }
10131
10132         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10133             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10134             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10135             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10136             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10137                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10138
10139         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10140                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10141
10142         /* If we have an AMD 762 or VIA K8T800 chipset, write
10143          * reordering to the mailbox registers done by the host
10144          * controller can cause major troubles.  We read back from
10145          * every mailbox register write to force the writes to be
10146          * posted to the chip in order.
10147          */
10148         if (pci_dev_present(write_reorder_chipsets) &&
10149             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10150                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10151
10152         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10153             tp->pci_lat_timer < 64) {
10154                 tp->pci_lat_timer = 64;
10155
10156                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10157                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10158                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10159                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10160
10161                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10162                                        cacheline_sz_reg);
10163         }
10164
10165         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10166                               &pci_state_reg);
10167
10168         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10169                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10170
10171                 /* If this is a 5700 BX chipset, and we are in PCI-X
10172                  * mode, enable register write workaround.
10173                  *
10174                  * The workaround is to use indirect register accesses
10175                  * for all chip writes not to mailbox registers.
10176                  */
10177                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10178                         u32 pm_reg;
10179                         u16 pci_cmd;
10180
10181                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10182
10183                         /* The chip can have it's power management PCI config
10184                          * space registers clobbered due to this bug.
10185                          * So explicitly force the chip into D0 here.
10186                          */
10187                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10188                                               &pm_reg);
10189                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10190                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10191                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10192                                                pm_reg);
10193
10194                         /* Also, force SERR#/PERR# in PCI command. */
10195                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10196                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10197                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10198                 }
10199         }
10200
10201         /* 5700 BX chips need to have their TX producer index mailboxes
10202          * written twice to workaround a bug.
10203          */
10204         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10205                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10206
10207         /* Back to back register writes can cause problems on this chip,
10208          * the workaround is to read back all reg writes except those to
10209          * mailbox regs.  See tg3_write_indirect_reg32().
10210          *
10211          * PCI Express 5750_A0 rev chips need this workaround too.
10212          */
10213         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10214             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10215              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10216                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10217
10218         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10219                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10220         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10221                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10222
10223         /* Chip-specific fixup from Broadcom driver */
10224         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10225             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10226                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10227                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10228         }
10229
10230         /* Default fast path register access methods */
10231         tp->read32 = tg3_read32;
10232         tp->write32 = tg3_write32;
10233         tp->read32_mbox = tg3_read32;
10234         tp->write32_mbox = tg3_write32;
10235         tp->write32_tx_mbox = tg3_write32;
10236         tp->write32_rx_mbox = tg3_write32;
10237
10238         /* Various workaround register access methods */
10239         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10240                 tp->write32 = tg3_write_indirect_reg32;
10241         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10242                 tp->write32 = tg3_write_flush_reg32;
10243
10244         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10245             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10246                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10247                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10248                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10249         }
10250
10251         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10252                 tp->read32 = tg3_read_indirect_reg32;
10253                 tp->write32 = tg3_write_indirect_reg32;
10254                 tp->read32_mbox = tg3_read_indirect_mbox;
10255                 tp->write32_mbox = tg3_write_indirect_mbox;
10256                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10257                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10258
10259                 iounmap(tp->regs);
10260                 tp->regs = NULL;
10261
10262                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10263                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10264                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10265         }
10266
10267         /* Get eeprom hw config before calling tg3_set_power_state().
10268          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10269          * determined before calling tg3_set_power_state() so that
10270          * we know whether or not to switch out of Vaux power.
10271          * When the flag is set, it means that GPIO1 is used for eeprom
10272          * write protect and also implies that it is a LOM where GPIOs
10273          * are not used to switch power.
10274          */ 
10275         tg3_get_eeprom_hw_cfg(tp);
10276
10277         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10278          * GPIO1 driven high will bring 5700's external PHY out of reset.
10279          * It is also used as eeprom write protect on LOMs.
10280          */
10281         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10282         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10283             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10284                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10285                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10286         /* Unused GPIO3 must be driven as output on 5752 because there
10287          * are no pull-up resistors on unused GPIO pins.
10288          */
10289         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10290                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10291
10292         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10293                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10294
10295         /* Force the chip into D0. */
10296         err = tg3_set_power_state(tp, PCI_D0);
10297         if (err) {
10298                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10299                        pci_name(tp->pdev));
10300                 return err;
10301         }
10302
10303         /* 5700 B0 chips do not support checksumming correctly due
10304          * to hardware bugs.
10305          */
10306         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10307                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10308
10309         /* Pseudo-header checksum is done by hardware logic and not
10310          * the offload processers, so make the chip do the pseudo-
10311          * header checksums on receive.  For transmit it is more
10312          * convenient to do the pseudo-header checksum in software
10313          * as Linux does that on transmit for us in all cases.
10314          */
10315         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
10316         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
10317
10318         /* Derive initial jumbo mode from MTU assigned in
10319          * ether_setup() via the alloc_etherdev() call
10320          */
10321         if (tp->dev->mtu > ETH_DATA_LEN &&
10322             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10323                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10324
10325         /* Determine WakeOnLan speed to use. */
10326         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10327             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10328             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10329             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10330                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10331         } else {
10332                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10333         }
10334
10335         /* A few boards don't want Ethernet@WireSpeed phy feature */
10336         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10337             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10338              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10339              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10340             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10341                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10342
10343         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10344             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10345                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10346         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10347                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10348
10349         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10350             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10351             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
10352                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10353
10354         tp->coalesce_mode = 0;
10355         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10356             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10357                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10358
10359         /* Initialize MAC MI mode, polling disabled. */
10360         tw32_f(MAC_MI_MODE, tp->mi_mode);
10361         udelay(80);
10362
10363         /* Initialize data/descriptor byte/word swapping. */
10364         val = tr32(GRC_MODE);
10365         val &= GRC_MODE_HOST_STACKUP;
10366         tw32(GRC_MODE, val | tp->grc_mode);
10367
10368         tg3_switch_clocks(tp);
10369
10370         /* Clear this out for sanity. */
10371         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10372
10373         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10374                               &pci_state_reg);
10375         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10376             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10377                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10378
10379                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10380                     chiprevid == CHIPREV_ID_5701_B0 ||
10381                     chiprevid == CHIPREV_ID_5701_B2 ||
10382                     chiprevid == CHIPREV_ID_5701_B5) {
10383                         void __iomem *sram_base;
10384
10385                         /* Write some dummy words into the SRAM status block
10386                          * area, see if it reads back correctly.  If the return
10387                          * value is bad, force enable the PCIX workaround.
10388                          */
10389                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10390
10391                         writel(0x00000000, sram_base);
10392                         writel(0x00000000, sram_base + 4);
10393                         writel(0xffffffff, sram_base + 4);
10394                         if (readl(sram_base) != 0x00000000)
10395                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10396                 }
10397         }
10398
10399         udelay(50);
10400         tg3_nvram_init(tp);
10401
10402         grc_misc_cfg = tr32(GRC_MISC_CFG);
10403         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10404
10405         /* Broadcom's driver says that CIOBE multisplit has a bug */
10406 #if 0
10407         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10408             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10409                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10410                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10411         }
10412 #endif
10413         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10414             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10415              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10416                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10417
10418         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10419             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10420                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10421         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10422                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10423                                       HOSTCC_MODE_CLRTICK_TXBD);
10424
10425                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10426                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10427                                        tp->misc_host_ctrl);
10428         }
10429
10430         /* these are limited to 10/100 only */
10431         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10432              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10433             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10434              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10435              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10436               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10437               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10438             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10439              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10440               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10441                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10442
10443         err = tg3_phy_probe(tp);
10444         if (err) {
10445                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10446                        pci_name(tp->pdev), err);
10447                 /* ... but do not return immediately ... */
10448         }
10449
10450         tg3_read_partno(tp);
10451         tg3_read_fw_ver(tp);
10452
10453         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10454                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10455         } else {
10456                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10457                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10458                 else
10459                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10460         }
10461
10462         /* 5700 {AX,BX} chips have a broken status block link
10463          * change bit implementation, so we must use the
10464          * status register in those cases.
10465          */
10466         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10467                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10468         else
10469                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10470
10471         /* The led_ctrl is set during tg3_phy_probe, here we might
10472          * have to force the link status polling mechanism based
10473          * upon subsystem IDs.
10474          */
10475         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10476             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10477                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10478                                   TG3_FLAG_USE_LINKCHG_REG);
10479         }
10480
10481         /* For all SERDES we poll the MAC status register. */
10482         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10483                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10484         else
10485                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10486
10487         /* All chips before 5787 can get confused if TX buffers
10488          * straddle the 4GB address boundary in some cases.
10489          */
10490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10491             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10492                 tp->dev->hard_start_xmit = tg3_start_xmit;
10493         else
10494                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10495
10496         tp->rx_offset = 2;
10497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10498             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10499                 tp->rx_offset = 0;
10500
10501         /* By default, disable wake-on-lan.  User can change this
10502          * using ETHTOOL_SWOL.
10503          */
10504         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10505
10506         return err;
10507 }
10508
10509 #ifdef CONFIG_SPARC64
10510 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10511 {
10512         struct net_device *dev = tp->dev;
10513         struct pci_dev *pdev = tp->pdev;
10514         struct pcidev_cookie *pcp = pdev->sysdata;
10515
10516         if (pcp != NULL) {
10517                 int node = pcp->prom_node;
10518
10519                 if (prom_getproplen(node, "local-mac-address") == 6) {
10520                         prom_getproperty(node, "local-mac-address",
10521                                          dev->dev_addr, 6);
10522                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10523                         return 0;
10524                 }
10525         }
10526         return -ENODEV;
10527 }
10528
10529 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10530 {
10531         struct net_device *dev = tp->dev;
10532
10533         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10534         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10535         return 0;
10536 }
10537 #endif
10538
10539 static int __devinit tg3_get_device_address(struct tg3 *tp)
10540 {
10541         struct net_device *dev = tp->dev;
10542         u32 hi, lo, mac_offset;
10543         int addr_ok = 0;
10544
10545 #ifdef CONFIG_SPARC64
10546         if (!tg3_get_macaddr_sparc(tp))
10547                 return 0;
10548 #endif
10549
10550         mac_offset = 0x7c;
10551         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10552              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10553             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10554                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10555                         mac_offset = 0xcc;
10556                 if (tg3_nvram_lock(tp))
10557                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10558                 else
10559                         tg3_nvram_unlock(tp);
10560         }
10561
10562         /* First try to get it from MAC address mailbox. */
10563         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10564         if ((hi >> 16) == 0x484b) {
10565                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10566                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10567
10568                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10569                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10570                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10571                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10572                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10573
10574                 /* Some old bootcode may report a 0 MAC address in SRAM */
10575                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10576         }
10577         if (!addr_ok) {
10578                 /* Next, try NVRAM. */
10579                 if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10580                     !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10581                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10582                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10583                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10584                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10585                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10586                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10587                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10588                 }
10589                 /* Finally just fetch it out of the MAC control regs. */
10590                 else {
10591                         hi = tr32(MAC_ADDR_0_HIGH);
10592                         lo = tr32(MAC_ADDR_0_LOW);
10593
10594                         dev->dev_addr[5] = lo & 0xff;
10595                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10596                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10597                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10598                         dev->dev_addr[1] = hi & 0xff;
10599                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10600                 }
10601         }
10602
10603         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10604 #ifdef CONFIG_SPARC64
10605                 if (!tg3_get_default_macaddr_sparc(tp))
10606                         return 0;
10607 #endif
10608                 return -EINVAL;
10609         }
10610         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10611         return 0;
10612 }
10613
10614 #define BOUNDARY_SINGLE_CACHELINE       1
10615 #define BOUNDARY_MULTI_CACHELINE        2
10616
10617 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10618 {
10619         int cacheline_size;
10620         u8 byte;
10621         int goal;
10622
10623         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10624         if (byte == 0)
10625                 cacheline_size = 1024;
10626         else
10627                 cacheline_size = (int) byte * 4;
10628
10629         /* On 5703 and later chips, the boundary bits have no
10630          * effect.
10631          */
10632         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10633             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10634             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10635                 goto out;
10636
10637 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10638         goal = BOUNDARY_MULTI_CACHELINE;
10639 #else
10640 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10641         goal = BOUNDARY_SINGLE_CACHELINE;
10642 #else
10643         goal = 0;
10644 #endif
10645 #endif
10646
10647         if (!goal)
10648                 goto out;
10649
10650         /* PCI controllers on most RISC systems tend to disconnect
10651          * when a device tries to burst across a cache-line boundary.
10652          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10653          *
10654          * Unfortunately, for PCI-E there are only limited
10655          * write-side controls for this, and thus for reads
10656          * we will still get the disconnects.  We'll also waste
10657          * these PCI cycles for both read and write for chips
10658          * other than 5700 and 5701 which do not implement the
10659          * boundary bits.
10660          */
10661         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10662             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10663                 switch (cacheline_size) {
10664                 case 16:
10665                 case 32:
10666                 case 64:
10667                 case 128:
10668                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10669                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10670                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10671                         } else {
10672                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10673                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10674                         }
10675                         break;
10676
10677                 case 256:
10678                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10679                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10680                         break;
10681
10682                 default:
10683                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10684                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10685                         break;
10686                 };
10687         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10688                 switch (cacheline_size) {
10689                 case 16:
10690                 case 32:
10691                 case 64:
10692                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10693                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10694                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10695                                 break;
10696                         }
10697                         /* fallthrough */
10698                 case 128:
10699                 default:
10700                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10701                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10702                         break;
10703                 };
10704         } else {
10705                 switch (cacheline_size) {
10706                 case 16:
10707                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10708                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10709                                         DMA_RWCTRL_WRITE_BNDRY_16);
10710                                 break;
10711                         }
10712                         /* fallthrough */
10713                 case 32:
10714                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10715                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10716                                         DMA_RWCTRL_WRITE_BNDRY_32);
10717                                 break;
10718                         }
10719                         /* fallthrough */
10720                 case 64:
10721                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10722                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10723                                         DMA_RWCTRL_WRITE_BNDRY_64);
10724                                 break;
10725                         }
10726                         /* fallthrough */
10727                 case 128:
10728                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10729                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10730                                         DMA_RWCTRL_WRITE_BNDRY_128);
10731                                 break;
10732                         }
10733                         /* fallthrough */
10734                 case 256:
10735                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10736                                 DMA_RWCTRL_WRITE_BNDRY_256);
10737                         break;
10738                 case 512:
10739                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10740                                 DMA_RWCTRL_WRITE_BNDRY_512);
10741                         break;
10742                 case 1024:
10743                 default:
10744                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10745                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10746                         break;
10747                 };
10748         }
10749
10750 out:
10751         return val;
10752 }
10753
10754 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10755 {
10756         struct tg3_internal_buffer_desc test_desc;
10757         u32 sram_dma_descs;
10758         int i, ret;
10759
10760         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10761
10762         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10763         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10764         tw32(RDMAC_STATUS, 0);
10765         tw32(WDMAC_STATUS, 0);
10766
10767         tw32(BUFMGR_MODE, 0);
10768         tw32(FTQ_RESET, 0);
10769
10770         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10771         test_desc.addr_lo = buf_dma & 0xffffffff;
10772         test_desc.nic_mbuf = 0x00002100;
10773         test_desc.len = size;
10774
10775         /*
10776          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10777          * the *second* time the tg3 driver was getting loaded after an
10778          * initial scan.
10779          *
10780          * Broadcom tells me:
10781          *   ...the DMA engine is connected to the GRC block and a DMA
10782          *   reset may affect the GRC block in some unpredictable way...
10783          *   The behavior of resets to individual blocks has not been tested.
10784          *
10785          * Broadcom noted the GRC reset will also reset all sub-components.
10786          */
10787         if (to_device) {
10788                 test_desc.cqid_sqid = (13 << 8) | 2;
10789
10790                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10791                 udelay(40);
10792         } else {
10793                 test_desc.cqid_sqid = (16 << 8) | 7;
10794
10795                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10796                 udelay(40);
10797         }
10798         test_desc.flags = 0x00000005;
10799
10800         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10801                 u32 val;
10802
10803                 val = *(((u32 *)&test_desc) + i);
10804                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10805                                        sram_dma_descs + (i * sizeof(u32)));
10806                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10807         }
10808         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10809
10810         if (to_device) {
10811                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10812         } else {
10813                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10814         }
10815
10816         ret = -ENODEV;
10817         for (i = 0; i < 40; i++) {
10818                 u32 val;
10819
10820                 if (to_device)
10821                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10822                 else
10823                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10824                 if ((val & 0xffff) == sram_dma_descs) {
10825                         ret = 0;
10826                         break;
10827                 }
10828
10829                 udelay(100);
10830         }
10831
10832         return ret;
10833 }
10834
10835 #define TEST_BUFFER_SIZE        0x2000
10836
10837 static int __devinit tg3_test_dma(struct tg3 *tp)
10838 {
10839         dma_addr_t buf_dma;
10840         u32 *buf, saved_dma_rwctrl;
10841         int ret;
10842
10843         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10844         if (!buf) {
10845                 ret = -ENOMEM;
10846                 goto out_nofree;
10847         }
10848
10849         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10850                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10851
10852         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10853
10854         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10855                 /* DMA read watermark not used on PCIE */
10856                 tp->dma_rwctrl |= 0x00180000;
10857         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10858                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10859                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10860                         tp->dma_rwctrl |= 0x003f0000;
10861                 else
10862                         tp->dma_rwctrl |= 0x003f000f;
10863         } else {
10864                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10865                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10866                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10867
10868                         /* If the 5704 is behind the EPB bridge, we can
10869                          * do the less restrictive ONE_DMA workaround for
10870                          * better performance.
10871                          */
10872                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10873                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10874                                 tp->dma_rwctrl |= 0x8000;
10875                         else if (ccval == 0x6 || ccval == 0x7)
10876                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10877
10878                         /* Set bit 23 to enable PCIX hw bug fix */
10879                         tp->dma_rwctrl |= 0x009f0000;
10880                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10881                         /* 5780 always in PCIX mode */
10882                         tp->dma_rwctrl |= 0x00144000;
10883                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10884                         /* 5714 always in PCIX mode */
10885                         tp->dma_rwctrl |= 0x00148000;
10886                 } else {
10887                         tp->dma_rwctrl |= 0x001b000f;
10888                 }
10889         }
10890
10891         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10892             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10893                 tp->dma_rwctrl &= 0xfffffff0;
10894
10895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10896             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10897                 /* Remove this if it causes problems for some boards. */
10898                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10899
10900                 /* On 5700/5701 chips, we need to set this bit.
10901                  * Otherwise the chip will issue cacheline transactions
10902                  * to streamable DMA memory with not all the byte
10903                  * enables turned on.  This is an error on several
10904                  * RISC PCI controllers, in particular sparc64.
10905                  *
10906                  * On 5703/5704 chips, this bit has been reassigned
10907                  * a different meaning.  In particular, it is used
10908                  * on those chips to enable a PCI-X workaround.
10909                  */
10910                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10911         }
10912
10913         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10914
10915 #if 0
10916         /* Unneeded, already done by tg3_get_invariants.  */
10917         tg3_switch_clocks(tp);
10918 #endif
10919
10920         ret = 0;
10921         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10922             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10923                 goto out;
10924
10925         /* It is best to perform DMA test with maximum write burst size
10926          * to expose the 5700/5701 write DMA bug.
10927          */
10928         saved_dma_rwctrl = tp->dma_rwctrl;
10929         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10930         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10931
10932         while (1) {
10933                 u32 *p = buf, i;
10934
10935                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10936                         p[i] = i;
10937
10938                 /* Send the buffer to the chip. */
10939                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10940                 if (ret) {
10941                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10942                         break;
10943                 }
10944
10945 #if 0
10946                 /* validate data reached card RAM correctly. */
10947                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10948                         u32 val;
10949                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10950                         if (le32_to_cpu(val) != p[i]) {
10951                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10952                                 /* ret = -ENODEV here? */
10953                         }
10954                         p[i] = 0;
10955                 }
10956 #endif
10957                 /* Now read it back. */
10958                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10959                 if (ret) {
10960                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10961
10962                         break;
10963                 }
10964
10965                 /* Verify it. */
10966                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10967                         if (p[i] == i)
10968                                 continue;
10969
10970                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10971                             DMA_RWCTRL_WRITE_BNDRY_16) {
10972                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10973                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10974                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10975                                 break;
10976                         } else {
10977                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10978                                 ret = -ENODEV;
10979                                 goto out;
10980                         }
10981                 }
10982
10983                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10984                         /* Success. */
10985                         ret = 0;
10986                         break;
10987                 }
10988         }
10989         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10990             DMA_RWCTRL_WRITE_BNDRY_16) {
10991                 static struct pci_device_id dma_wait_state_chipsets[] = {
10992                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10993                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10994                         { },
10995                 };
10996
10997                 /* DMA test passed without adjusting DMA boundary,
10998                  * now look for chipsets that are known to expose the
10999                  * DMA bug without failing the test.
11000                  */
11001                 if (pci_dev_present(dma_wait_state_chipsets)) {
11002                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11003                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11004                 }
11005                 else
11006                         /* Safe to use the calculated DMA boundary. */
11007                         tp->dma_rwctrl = saved_dma_rwctrl;
11008
11009                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11010         }
11011
11012 out:
11013         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11014 out_nofree:
11015         return ret;
11016 }
11017
11018 static void __devinit tg3_init_link_config(struct tg3 *tp)
11019 {
11020         tp->link_config.advertising =
11021                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11022                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11023                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11024                  ADVERTISED_Autoneg | ADVERTISED_MII);
11025         tp->link_config.speed = SPEED_INVALID;
11026         tp->link_config.duplex = DUPLEX_INVALID;
11027         tp->link_config.autoneg = AUTONEG_ENABLE;
11028         tp->link_config.active_speed = SPEED_INVALID;
11029         tp->link_config.active_duplex = DUPLEX_INVALID;
11030         tp->link_config.phy_is_low_power = 0;
11031         tp->link_config.orig_speed = SPEED_INVALID;
11032         tp->link_config.orig_duplex = DUPLEX_INVALID;
11033         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11034 }
11035
11036 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11037 {
11038         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11039                 tp->bufmgr_config.mbuf_read_dma_low_water =
11040                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11041                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11042                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11043                 tp->bufmgr_config.mbuf_high_water =
11044                         DEFAULT_MB_HIGH_WATER_5705;
11045
11046                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11047                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11048                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11049                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11050                 tp->bufmgr_config.mbuf_high_water_jumbo =
11051                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11052         } else {
11053                 tp->bufmgr_config.mbuf_read_dma_low_water =
11054                         DEFAULT_MB_RDMA_LOW_WATER;
11055                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11056                         DEFAULT_MB_MACRX_LOW_WATER;
11057                 tp->bufmgr_config.mbuf_high_water =
11058                         DEFAULT_MB_HIGH_WATER;
11059
11060                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11061                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11062                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11063                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11064                 tp->bufmgr_config.mbuf_high_water_jumbo =
11065                         DEFAULT_MB_HIGH_WATER_JUMBO;
11066         }
11067
11068         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11069         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11070 }
11071
11072 static char * __devinit tg3_phy_string(struct tg3 *tp)
11073 {
11074         switch (tp->phy_id & PHY_ID_MASK) {
11075         case PHY_ID_BCM5400:    return "5400";
11076         case PHY_ID_BCM5401:    return "5401";
11077         case PHY_ID_BCM5411:    return "5411";
11078         case PHY_ID_BCM5701:    return "5701";
11079         case PHY_ID_BCM5703:    return "5703";
11080         case PHY_ID_BCM5704:    return "5704";
11081         case PHY_ID_BCM5705:    return "5705";
11082         case PHY_ID_BCM5750:    return "5750";
11083         case PHY_ID_BCM5752:    return "5752";
11084         case PHY_ID_BCM5714:    return "5714";
11085         case PHY_ID_BCM5780:    return "5780";
11086         case PHY_ID_BCM5755:    return "5755";
11087         case PHY_ID_BCM5787:    return "5787";
11088         case PHY_ID_BCM8002:    return "8002/serdes";
11089         case 0:                 return "serdes";
11090         default:                return "unknown";
11091         };
11092 }
11093
11094 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11095 {
11096         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11097                 strcpy(str, "PCI Express");
11098                 return str;
11099         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11100                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11101
11102                 strcpy(str, "PCIX:");
11103
11104                 if ((clock_ctrl == 7) ||
11105                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11106                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11107                         strcat(str, "133MHz");
11108                 else if (clock_ctrl == 0)
11109                         strcat(str, "33MHz");
11110                 else if (clock_ctrl == 2)
11111                         strcat(str, "50MHz");
11112                 else if (clock_ctrl == 4)
11113                         strcat(str, "66MHz");
11114                 else if (clock_ctrl == 6)
11115                         strcat(str, "100MHz");
11116         } else {
11117                 strcpy(str, "PCI:");
11118                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11119                         strcat(str, "66MHz");
11120                 else
11121                         strcat(str, "33MHz");
11122         }
11123         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11124                 strcat(str, ":32-bit");
11125         else
11126                 strcat(str, ":64-bit");
11127         return str;
11128 }
11129
11130 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11131 {
11132         struct pci_dev *peer;
11133         unsigned int func, devnr = tp->pdev->devfn & ~7;
11134
11135         for (func = 0; func < 8; func++) {
11136                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11137                 if (peer && peer != tp->pdev)
11138                         break;
11139                 pci_dev_put(peer);
11140         }
11141         /* 5704 can be configured in single-port mode, set peer to
11142          * tp->pdev in that case.
11143          */
11144         if (!peer) {
11145                 peer = tp->pdev;
11146                 return peer;
11147         }
11148
11149         /*
11150          * We don't need to keep the refcount elevated; there's no way
11151          * to remove one half of this device without removing the other
11152          */
11153         pci_dev_put(peer);
11154
11155         return peer;
11156 }
11157
11158 static void __devinit tg3_init_coal(struct tg3 *tp)
11159 {
11160         struct ethtool_coalesce *ec = &tp->coal;
11161
11162         memset(ec, 0, sizeof(*ec));
11163         ec->cmd = ETHTOOL_GCOALESCE;
11164         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11165         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11166         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11167         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11168         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11169         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11170         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11171         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11172         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11173
11174         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11175                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11176                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11177                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11178                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11179                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11180         }
11181
11182         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11183                 ec->rx_coalesce_usecs_irq = 0;
11184                 ec->tx_coalesce_usecs_irq = 0;
11185                 ec->stats_block_coalesce_usecs = 0;
11186         }
11187 }
11188
11189 static int __devinit tg3_init_one(struct pci_dev *pdev,
11190                                   const struct pci_device_id *ent)
11191 {
11192         static int tg3_version_printed = 0;
11193         unsigned long tg3reg_base, tg3reg_len;
11194         struct net_device *dev;
11195         struct tg3 *tp;
11196         int i, err, pm_cap;
11197         char str[40];
11198         u64 dma_mask, persist_dma_mask;
11199
11200         if (tg3_version_printed++ == 0)
11201                 printk(KERN_INFO "%s", version);
11202
11203         err = pci_enable_device(pdev);
11204         if (err) {
11205                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11206                        "aborting.\n");
11207                 return err;
11208         }
11209
11210         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11211                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11212                        "base address, aborting.\n");
11213                 err = -ENODEV;
11214                 goto err_out_disable_pdev;
11215         }
11216
11217         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11218         if (err) {
11219                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11220                        "aborting.\n");
11221                 goto err_out_disable_pdev;
11222         }
11223
11224         pci_set_master(pdev);
11225
11226         /* Find power-management capability. */
11227         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11228         if (pm_cap == 0) {
11229                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11230                        "aborting.\n");
11231                 err = -EIO;
11232                 goto err_out_free_res;
11233         }
11234
11235         tg3reg_base = pci_resource_start(pdev, 0);
11236         tg3reg_len = pci_resource_len(pdev, 0);
11237
11238         dev = alloc_etherdev(sizeof(*tp));
11239         if (!dev) {
11240                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11241                 err = -ENOMEM;
11242                 goto err_out_free_res;
11243         }
11244
11245         SET_MODULE_OWNER(dev);
11246         SET_NETDEV_DEV(dev, &pdev->dev);
11247
11248         dev->features |= NETIF_F_LLTX;
11249 #if TG3_VLAN_TAG_USED
11250         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11251         dev->vlan_rx_register = tg3_vlan_rx_register;
11252         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11253 #endif
11254
11255         tp = netdev_priv(dev);
11256         tp->pdev = pdev;
11257         tp->dev = dev;
11258         tp->pm_cap = pm_cap;
11259         tp->mac_mode = TG3_DEF_MAC_MODE;
11260         tp->rx_mode = TG3_DEF_RX_MODE;
11261         tp->tx_mode = TG3_DEF_TX_MODE;
11262         tp->mi_mode = MAC_MI_MODE_BASE;
11263         if (tg3_debug > 0)
11264                 tp->msg_enable = tg3_debug;
11265         else
11266                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11267
11268         /* The word/byte swap controls here control register access byte
11269          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11270          * setting below.
11271          */
11272         tp->misc_host_ctrl =
11273                 MISC_HOST_CTRL_MASK_PCI_INT |
11274                 MISC_HOST_CTRL_WORD_SWAP |
11275                 MISC_HOST_CTRL_INDIR_ACCESS |
11276                 MISC_HOST_CTRL_PCISTATE_RW;
11277
11278         /* The NONFRM (non-frame) byte/word swap controls take effect
11279          * on descriptor entries, anything which isn't packet data.
11280          *
11281          * The StrongARM chips on the board (one for tx, one for rx)
11282          * are running in big-endian mode.
11283          */
11284         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11285                         GRC_MODE_WSWAP_NONFRM_DATA);
11286 #ifdef __BIG_ENDIAN
11287         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11288 #endif
11289         spin_lock_init(&tp->lock);
11290         spin_lock_init(&tp->tx_lock);
11291         spin_lock_init(&tp->indirect_lock);
11292         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11293
11294         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11295         if (tp->regs == 0UL) {
11296                 printk(KERN_ERR PFX "Cannot map device registers, "
11297                        "aborting.\n");
11298                 err = -ENOMEM;
11299                 goto err_out_free_dev;
11300         }
11301
11302         tg3_init_link_config(tp);
11303
11304         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11305         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11306         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11307
11308         dev->open = tg3_open;
11309         dev->stop = tg3_close;
11310         dev->get_stats = tg3_get_stats;
11311         dev->set_multicast_list = tg3_set_rx_mode;
11312         dev->set_mac_address = tg3_set_mac_addr;
11313         dev->do_ioctl = tg3_ioctl;
11314         dev->tx_timeout = tg3_tx_timeout;
11315         dev->poll = tg3_poll;
11316         dev->ethtool_ops = &tg3_ethtool_ops;
11317         dev->weight = 64;
11318         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11319         dev->change_mtu = tg3_change_mtu;
11320         dev->irq = pdev->irq;
11321 #ifdef CONFIG_NET_POLL_CONTROLLER
11322         dev->poll_controller = tg3_poll_controller;
11323 #endif
11324
11325         err = tg3_get_invariants(tp);
11326         if (err) {
11327                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11328                        "aborting.\n");
11329                 goto err_out_iounmap;
11330         }
11331
11332         /* The EPB bridge inside 5714, 5715, and 5780 and any
11333          * device behind the EPB cannot support DMA addresses > 40-bit.
11334          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11335          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11336          * do DMA address check in tg3_start_xmit().
11337          */
11338         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11339                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11340         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11341                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11342 #ifdef CONFIG_HIGHMEM
11343                 dma_mask = DMA_64BIT_MASK;
11344 #endif
11345         } else
11346                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11347
11348         /* Configure DMA attributes. */
11349         if (dma_mask > DMA_32BIT_MASK) {
11350                 err = pci_set_dma_mask(pdev, dma_mask);
11351                 if (!err) {
11352                         dev->features |= NETIF_F_HIGHDMA;
11353                         err = pci_set_consistent_dma_mask(pdev,
11354                                                           persist_dma_mask);
11355                         if (err < 0) {
11356                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11357                                        "DMA for consistent allocations\n");
11358                                 goto err_out_iounmap;
11359                         }
11360                 }
11361         }
11362         if (err || dma_mask == DMA_32BIT_MASK) {
11363                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11364                 if (err) {
11365                         printk(KERN_ERR PFX "No usable DMA configuration, "
11366                                "aborting.\n");
11367                         goto err_out_iounmap;
11368                 }
11369         }
11370
11371         tg3_init_bufmgr_config(tp);
11372
11373 #if TG3_TSO_SUPPORT != 0
11374         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11375                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11376         }
11377         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11378             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11379             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11380             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11381                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11382         } else {
11383                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11384         }
11385
11386         /* TSO is on by default on chips that support hardware TSO.
11387          * Firmware TSO on older chips gives lower performance, so it
11388          * is off by default, but can be enabled using ethtool.
11389          */
11390         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11391                 dev->features |= NETIF_F_TSO;
11392
11393 #endif
11394
11395         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11396             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11397             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11398                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11399                 tp->rx_pending = 63;
11400         }
11401
11402         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11403             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11404                 tp->pdev_peer = tg3_find_peer(tp);
11405
11406         err = tg3_get_device_address(tp);
11407         if (err) {
11408                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11409                        "aborting.\n");
11410                 goto err_out_iounmap;
11411         }
11412
11413         /*
11414          * Reset chip in case UNDI or EFI driver did not shutdown
11415          * DMA self test will enable WDMAC and we'll see (spurious)
11416          * pending DMA on the PCI bus at that point.
11417          */
11418         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11419             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11420                 pci_save_state(tp->pdev);
11421                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11422                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11423         }
11424
11425         err = tg3_test_dma(tp);
11426         if (err) {
11427                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11428                 goto err_out_iounmap;
11429         }
11430
11431         /* Tigon3 can do ipv4 only... and some chips have buggy
11432          * checksumming.
11433          */
11434         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11435                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11436                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11437                         dev->features |= NETIF_F_HW_CSUM;
11438                 else
11439                         dev->features |= NETIF_F_IP_CSUM;
11440                 dev->features |= NETIF_F_SG;
11441                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11442         } else
11443                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11444
11445         /* flow control autonegotiation is default behavior */
11446         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11447
11448         tg3_init_coal(tp);
11449
11450         /* Now that we have fully setup the chip, save away a snapshot
11451          * of the PCI config space.  We need to restore this after
11452          * GRC_MISC_CFG core clock resets and some resume events.
11453          */
11454         pci_save_state(tp->pdev);
11455
11456         err = register_netdev(dev);
11457         if (err) {
11458                 printk(KERN_ERR PFX "Cannot register net device, "
11459                        "aborting.\n");
11460                 goto err_out_iounmap;
11461         }
11462
11463         pci_set_drvdata(pdev, dev);
11464
11465         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11466                dev->name,
11467                tp->board_part_number,
11468                tp->pci_chip_rev_id,
11469                tg3_phy_string(tp),
11470                tg3_bus_string(tp, str),
11471                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11472
11473         for (i = 0; i < 6; i++)
11474                 printk("%2.2x%c", dev->dev_addr[i],
11475                        i == 5 ? '\n' : ':');
11476
11477         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11478                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11479                "TSOcap[%d] \n",
11480                dev->name,
11481                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11482                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11483                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11484                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11485                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11486                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11487                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11488         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11489                dev->name, tp->dma_rwctrl,
11490                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11491                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11492
11493         netif_carrier_off(tp->dev);
11494
11495         return 0;
11496
11497 err_out_iounmap:
11498         if (tp->regs) {
11499                 iounmap(tp->regs);
11500                 tp->regs = NULL;
11501         }
11502
11503 err_out_free_dev:
11504         free_netdev(dev);
11505
11506 err_out_free_res:
11507         pci_release_regions(pdev);
11508
11509 err_out_disable_pdev:
11510         pci_disable_device(pdev);
11511         pci_set_drvdata(pdev, NULL);
11512         return err;
11513 }
11514
11515 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11516 {
11517         struct net_device *dev = pci_get_drvdata(pdev);
11518
11519         if (dev) {
11520                 struct tg3 *tp = netdev_priv(dev);
11521
11522                 flush_scheduled_work();
11523                 unregister_netdev(dev);
11524                 if (tp->regs) {
11525                         iounmap(tp->regs);
11526                         tp->regs = NULL;
11527                 }
11528                 free_netdev(dev);
11529                 pci_release_regions(pdev);
11530                 pci_disable_device(pdev);
11531                 pci_set_drvdata(pdev, NULL);
11532         }
11533 }
11534
11535 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11536 {
11537         struct net_device *dev = pci_get_drvdata(pdev);
11538         struct tg3 *tp = netdev_priv(dev);
11539         int err;
11540
11541         if (!netif_running(dev))
11542                 return 0;
11543
11544         flush_scheduled_work();
11545         tg3_netif_stop(tp);
11546
11547         del_timer_sync(&tp->timer);
11548
11549         tg3_full_lock(tp, 1);
11550         tg3_disable_ints(tp);
11551         tg3_full_unlock(tp);
11552
11553         netif_device_detach(dev);
11554
11555         tg3_full_lock(tp, 0);
11556         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11557         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11558         tg3_full_unlock(tp);
11559
11560         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11561         if (err) {
11562                 tg3_full_lock(tp, 0);
11563
11564                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11565                 tg3_init_hw(tp);
11566
11567                 tp->timer.expires = jiffies + tp->timer_offset;
11568                 add_timer(&tp->timer);
11569
11570                 netif_device_attach(dev);
11571                 tg3_netif_start(tp);
11572
11573                 tg3_full_unlock(tp);
11574         }
11575
11576         return err;
11577 }
11578
11579 static int tg3_resume(struct pci_dev *pdev)
11580 {
11581         struct net_device *dev = pci_get_drvdata(pdev);
11582         struct tg3 *tp = netdev_priv(dev);
11583         int err;
11584
11585         if (!netif_running(dev))
11586                 return 0;
11587
11588         pci_restore_state(tp->pdev);
11589
11590         err = tg3_set_power_state(tp, PCI_D0);
11591         if (err)
11592                 return err;
11593
11594         netif_device_attach(dev);
11595
11596         tg3_full_lock(tp, 0);
11597
11598         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11599         tg3_init_hw(tp);
11600
11601         tp->timer.expires = jiffies + tp->timer_offset;
11602         add_timer(&tp->timer);
11603
11604         tg3_netif_start(tp);
11605
11606         tg3_full_unlock(tp);
11607
11608         return 0;
11609 }
11610
11611 static struct pci_driver tg3_driver = {
11612         .name           = DRV_MODULE_NAME,
11613         .id_table       = tg3_pci_tbl,
11614         .probe          = tg3_init_one,
11615         .remove         = __devexit_p(tg3_remove_one),
11616         .suspend        = tg3_suspend,
11617         .resume         = tg3_resume
11618 };
11619
11620 static int __init tg3_init(void)
11621 {
11622         return pci_module_init(&tg3_driver);
11623 }
11624
11625 static void __exit tg3_cleanup(void)
11626 {
11627         pci_unregister_driver(&tg3_driver);
11628 }
11629
11630 module_init(tg3_init);
11631 module_exit(tg3_cleanup);