[TG3]: Add basic register access function pointers
[safe/jmp/linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39
40 #include <net/checksum.h>
41
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
46
47 #ifdef CONFIG_SPARC64
48 #include <asm/idprom.h>
49 #include <asm/oplib.h>
50 #include <asm/pbm.h>
51 #endif
52
53 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54 #define TG3_VLAN_TAG_USED 1
55 #else
56 #define TG3_VLAN_TAG_USED 0
57 #endif
58
59 #ifdef NETIF_F_TSO
60 #define TG3_TSO_SUPPORT 1
61 #else
62 #define TG3_TSO_SUPPORT 0
63 #endif
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.37"
70 #define DRV_MODULE_RELDATE      "August 25, 2005"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define TX_RING_GAP(TP) \
125         (TG3_TX_RING_SIZE - (TP)->tx_pending)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
128           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
129           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { 0, }
245 };
246
247 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
248
249 static struct {
250         const char string[ETH_GSTRING_LEN];
251 } ethtool_stats_keys[TG3_NUM_STATS] = {
252         { "rx_octets" },
253         { "rx_fragments" },
254         { "rx_ucast_packets" },
255         { "rx_mcast_packets" },
256         { "rx_bcast_packets" },
257         { "rx_fcs_errors" },
258         { "rx_align_errors" },
259         { "rx_xon_pause_rcvd" },
260         { "rx_xoff_pause_rcvd" },
261         { "rx_mac_ctrl_rcvd" },
262         { "rx_xoff_entered" },
263         { "rx_frame_too_long_errors" },
264         { "rx_jabbers" },
265         { "rx_undersize_packets" },
266         { "rx_in_length_errors" },
267         { "rx_out_length_errors" },
268         { "rx_64_or_less_octet_packets" },
269         { "rx_65_to_127_octet_packets" },
270         { "rx_128_to_255_octet_packets" },
271         { "rx_256_to_511_octet_packets" },
272         { "rx_512_to_1023_octet_packets" },
273         { "rx_1024_to_1522_octet_packets" },
274         { "rx_1523_to_2047_octet_packets" },
275         { "rx_2048_to_4095_octet_packets" },
276         { "rx_4096_to_8191_octet_packets" },
277         { "rx_8192_to_9022_octet_packets" },
278
279         { "tx_octets" },
280         { "tx_collisions" },
281
282         { "tx_xon_sent" },
283         { "tx_xoff_sent" },
284         { "tx_flow_control" },
285         { "tx_mac_errors" },
286         { "tx_single_collisions" },
287         { "tx_mult_collisions" },
288         { "tx_deferred" },
289         { "tx_excessive_collisions" },
290         { "tx_late_collisions" },
291         { "tx_collide_2times" },
292         { "tx_collide_3times" },
293         { "tx_collide_4times" },
294         { "tx_collide_5times" },
295         { "tx_collide_6times" },
296         { "tx_collide_7times" },
297         { "tx_collide_8times" },
298         { "tx_collide_9times" },
299         { "tx_collide_10times" },
300         { "tx_collide_11times" },
301         { "tx_collide_12times" },
302         { "tx_collide_13times" },
303         { "tx_collide_14times" },
304         { "tx_collide_15times" },
305         { "tx_ucast_packets" },
306         { "tx_mcast_packets" },
307         { "tx_bcast_packets" },
308         { "tx_carrier_sense_errors" },
309         { "tx_discards" },
310         { "tx_errors" },
311
312         { "dma_writeq_full" },
313         { "dma_write_prioq_full" },
314         { "rxbds_empty" },
315         { "rx_discards" },
316         { "rx_errors" },
317         { "rx_threshold_hit" },
318
319         { "dma_readq_full" },
320         { "dma_read_prioq_full" },
321         { "tx_comp_queue_full" },
322
323         { "ring_set_send_prod_index" },
324         { "ring_status_update" },
325         { "nic_irqs" },
326         { "nic_avoided_irqs" },
327         { "nic_tx_threshold_hit" }
328 };
329
330 static struct {
331         const char string[ETH_GSTRING_LEN];
332 } ethtool_test_keys[TG3_NUM_TEST] = {
333         { "nvram test     (online) " },
334         { "link test      (online) " },
335         { "register test  (offline)" },
336         { "memory test    (offline)" },
337         { "loopback test  (offline)" },
338         { "interrupt test (offline)" },
339 };
340
341 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342 {
343         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
344                 spin_lock_bh(&tp->indirect_lock);
345                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
347                 spin_unlock_bh(&tp->indirect_lock);
348         } else {
349                 writel(val, tp->regs + off);
350                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
351                         readl(tp->regs + off);
352         }
353 }
354
355 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
356 {
357         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
358                 spin_lock_bh(&tp->indirect_lock);
359                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361                 spin_unlock_bh(&tp->indirect_lock);
362         } else {
363                 void __iomem *dest = tp->regs + off;
364                 writel(val, dest);
365                 readl(dest);    /* always flush PCI write */
366         }
367 }
368
369 static void tg3_write32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
370 {
371         void __iomem *mbox = tp->regs + off;
372         writel(val, mbox);
373         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
374                 readl(mbox);
375 }
376
377 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
378 {
379         void __iomem *mbox = tp->regs + off;
380         writel(val, mbox);
381         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
382                 writel(val, mbox);
383         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
384                 readl(mbox);
385 }
386
387 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
388 {
389         writel(val, tp->regs + off);
390 }
391
392 static u32 tg3_read32(struct tg3 *tp, u32 off)
393 {
394         return (readl(tp->regs + off)); 
395 }
396
397 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
398 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
399 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
400
401 #define tw32(reg,val)           tp->write32(tp, reg, val)
402 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
403 #define tr32(reg)               tp->read32(tp, reg)
404
405 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
406 {
407         spin_lock_bh(&tp->indirect_lock);
408         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
409         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
410
411         /* Always leave this as zero. */
412         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
413         spin_unlock_bh(&tp->indirect_lock);
414 }
415
416 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
417 {
418         spin_lock_bh(&tp->indirect_lock);
419         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
420         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
421
422         /* Always leave this as zero. */
423         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
424         spin_unlock_bh(&tp->indirect_lock);
425 }
426
427 static void tg3_disable_ints(struct tg3 *tp)
428 {
429         tw32(TG3PCI_MISC_HOST_CTRL,
430              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
431         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
432         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
433 }
434
435 static inline void tg3_cond_int(struct tg3 *tp)
436 {
437         if (tp->hw_status->status & SD_STATUS_UPDATED)
438                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
439 }
440
441 static void tg3_enable_ints(struct tg3 *tp)
442 {
443         tp->irq_sync = 0;
444         wmb();
445
446         tw32(TG3PCI_MISC_HOST_CTRL,
447              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
448         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
449                      (tp->last_tag << 24));
450         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
451         tg3_cond_int(tp);
452 }
453
454 static inline unsigned int tg3_has_work(struct tg3 *tp)
455 {
456         struct tg3_hw_status *sblk = tp->hw_status;
457         unsigned int work_exists = 0;
458
459         /* check for phy events */
460         if (!(tp->tg3_flags &
461               (TG3_FLAG_USE_LINKCHG_REG |
462                TG3_FLAG_POLL_SERDES))) {
463                 if (sblk->status & SD_STATUS_LINK_CHG)
464                         work_exists = 1;
465         }
466         /* check for RX/TX work to do */
467         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
468             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
469                 work_exists = 1;
470
471         return work_exists;
472 }
473
474 /* tg3_restart_ints
475  *  similar to tg3_enable_ints, but it accurately determines whether there
476  *  is new work pending and can return without flushing the PIO write
477  *  which reenables interrupts 
478  */
479 static void tg3_restart_ints(struct tg3 *tp)
480 {
481         tw32(TG3PCI_MISC_HOST_CTRL,
482                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
483         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
484                      tp->last_tag << 24);
485         mmiowb();
486
487         /* When doing tagged status, this work check is unnecessary.
488          * The last_tag we write above tells the chip which piece of
489          * work we've completed.
490          */
491         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
492             tg3_has_work(tp))
493                 tw32(HOSTCC_MODE, tp->coalesce_mode |
494                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
495 }
496
497 static inline void tg3_netif_stop(struct tg3 *tp)
498 {
499         tp->dev->trans_start = jiffies; /* prevent tx timeout */
500         netif_poll_disable(tp->dev);
501         netif_tx_disable(tp->dev);
502 }
503
504 static inline void tg3_netif_start(struct tg3 *tp)
505 {
506         netif_wake_queue(tp->dev);
507         /* NOTE: unconditional netif_wake_queue is only appropriate
508          * so long as all callers are assured to have free tx slots
509          * (such as after tg3_init_hw)
510          */
511         netif_poll_enable(tp->dev);
512         tp->hw_status->status |= SD_STATUS_UPDATED;
513         tg3_enable_ints(tp);
514 }
515
516 static void tg3_switch_clocks(struct tg3 *tp)
517 {
518         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
519         u32 orig_clock_ctrl;
520
521         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
522                 return;
523
524         orig_clock_ctrl = clock_ctrl;
525         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
526                        CLOCK_CTRL_CLKRUN_OENABLE |
527                        0x1f);
528         tp->pci_clock_ctrl = clock_ctrl;
529
530         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
531                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
532                         tw32_f(TG3PCI_CLOCK_CTRL,
533                                clock_ctrl | CLOCK_CTRL_625_CORE);
534                         udelay(40);
535                 }
536         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
537                 tw32_f(TG3PCI_CLOCK_CTRL,
538                      clock_ctrl |
539                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
540                 udelay(40);
541                 tw32_f(TG3PCI_CLOCK_CTRL,
542                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
543                 udelay(40);
544         }
545         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
546         udelay(40);
547 }
548
549 #define PHY_BUSY_LOOPS  5000
550
551 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
552 {
553         u32 frame_val;
554         unsigned int loops;
555         int ret;
556
557         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
558                 tw32_f(MAC_MI_MODE,
559                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
560                 udelay(80);
561         }
562
563         *val = 0x0;
564
565         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
566                       MI_COM_PHY_ADDR_MASK);
567         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
568                       MI_COM_REG_ADDR_MASK);
569         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
570         
571         tw32_f(MAC_MI_COM, frame_val);
572
573         loops = PHY_BUSY_LOOPS;
574         while (loops != 0) {
575                 udelay(10);
576                 frame_val = tr32(MAC_MI_COM);
577
578                 if ((frame_val & MI_COM_BUSY) == 0) {
579                         udelay(5);
580                         frame_val = tr32(MAC_MI_COM);
581                         break;
582                 }
583                 loops -= 1;
584         }
585
586         ret = -EBUSY;
587         if (loops != 0) {
588                 *val = frame_val & MI_COM_DATA_MASK;
589                 ret = 0;
590         }
591
592         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
593                 tw32_f(MAC_MI_MODE, tp->mi_mode);
594                 udelay(80);
595         }
596
597         return ret;
598 }
599
600 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
601 {
602         u32 frame_val;
603         unsigned int loops;
604         int ret;
605
606         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
607                 tw32_f(MAC_MI_MODE,
608                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
609                 udelay(80);
610         }
611
612         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
613                       MI_COM_PHY_ADDR_MASK);
614         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
615                       MI_COM_REG_ADDR_MASK);
616         frame_val |= (val & MI_COM_DATA_MASK);
617         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
618         
619         tw32_f(MAC_MI_COM, frame_val);
620
621         loops = PHY_BUSY_LOOPS;
622         while (loops != 0) {
623                 udelay(10);
624                 frame_val = tr32(MAC_MI_COM);
625                 if ((frame_val & MI_COM_BUSY) == 0) {
626                         udelay(5);
627                         frame_val = tr32(MAC_MI_COM);
628                         break;
629                 }
630                 loops -= 1;
631         }
632
633         ret = -EBUSY;
634         if (loops != 0)
635                 ret = 0;
636
637         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
638                 tw32_f(MAC_MI_MODE, tp->mi_mode);
639                 udelay(80);
640         }
641
642         return ret;
643 }
644
645 static void tg3_phy_set_wirespeed(struct tg3 *tp)
646 {
647         u32 val;
648
649         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
650                 return;
651
652         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
653             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
654                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
655                              (val | (1 << 15) | (1 << 4)));
656 }
657
658 static int tg3_bmcr_reset(struct tg3 *tp)
659 {
660         u32 phy_control;
661         int limit, err;
662
663         /* OK, reset it, and poll the BMCR_RESET bit until it
664          * clears or we time out.
665          */
666         phy_control = BMCR_RESET;
667         err = tg3_writephy(tp, MII_BMCR, phy_control);
668         if (err != 0)
669                 return -EBUSY;
670
671         limit = 5000;
672         while (limit--) {
673                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
674                 if (err != 0)
675                         return -EBUSY;
676
677                 if ((phy_control & BMCR_RESET) == 0) {
678                         udelay(40);
679                         break;
680                 }
681                 udelay(10);
682         }
683         if (limit <= 0)
684                 return -EBUSY;
685
686         return 0;
687 }
688
689 static int tg3_wait_macro_done(struct tg3 *tp)
690 {
691         int limit = 100;
692
693         while (limit--) {
694                 u32 tmp32;
695
696                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
697                         if ((tmp32 & 0x1000) == 0)
698                                 break;
699                 }
700         }
701         if (limit <= 0)
702                 return -EBUSY;
703
704         return 0;
705 }
706
707 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
708 {
709         static const u32 test_pat[4][6] = {
710         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
711         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
712         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
713         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
714         };
715         int chan;
716
717         for (chan = 0; chan < 4; chan++) {
718                 int i;
719
720                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
721                              (chan * 0x2000) | 0x0200);
722                 tg3_writephy(tp, 0x16, 0x0002);
723
724                 for (i = 0; i < 6; i++)
725                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
726                                      test_pat[chan][i]);
727
728                 tg3_writephy(tp, 0x16, 0x0202);
729                 if (tg3_wait_macro_done(tp)) {
730                         *resetp = 1;
731                         return -EBUSY;
732                 }
733
734                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
735                              (chan * 0x2000) | 0x0200);
736                 tg3_writephy(tp, 0x16, 0x0082);
737                 if (tg3_wait_macro_done(tp)) {
738                         *resetp = 1;
739                         return -EBUSY;
740                 }
741
742                 tg3_writephy(tp, 0x16, 0x0802);
743                 if (tg3_wait_macro_done(tp)) {
744                         *resetp = 1;
745                         return -EBUSY;
746                 }
747
748                 for (i = 0; i < 6; i += 2) {
749                         u32 low, high;
750
751                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
752                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
753                             tg3_wait_macro_done(tp)) {
754                                 *resetp = 1;
755                                 return -EBUSY;
756                         }
757                         low &= 0x7fff;
758                         high &= 0x000f;
759                         if (low != test_pat[chan][i] ||
760                             high != test_pat[chan][i+1]) {
761                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
762                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
763                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
764
765                                 return -EBUSY;
766                         }
767                 }
768         }
769
770         return 0;
771 }
772
773 static int tg3_phy_reset_chanpat(struct tg3 *tp)
774 {
775         int chan;
776
777         for (chan = 0; chan < 4; chan++) {
778                 int i;
779
780                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
781                              (chan * 0x2000) | 0x0200);
782                 tg3_writephy(tp, 0x16, 0x0002);
783                 for (i = 0; i < 6; i++)
784                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
785                 tg3_writephy(tp, 0x16, 0x0202);
786                 if (tg3_wait_macro_done(tp))
787                         return -EBUSY;
788         }
789
790         return 0;
791 }
792
793 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
794 {
795         u32 reg32, phy9_orig;
796         int retries, do_phy_reset, err;
797
798         retries = 10;
799         do_phy_reset = 1;
800         do {
801                 if (do_phy_reset) {
802                         err = tg3_bmcr_reset(tp);
803                         if (err)
804                                 return err;
805                         do_phy_reset = 0;
806                 }
807
808                 /* Disable transmitter and interrupt.  */
809                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
810                         continue;
811
812                 reg32 |= 0x3000;
813                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
814
815                 /* Set full-duplex, 1000 mbps.  */
816                 tg3_writephy(tp, MII_BMCR,
817                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
818
819                 /* Set to master mode.  */
820                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
821                         continue;
822
823                 tg3_writephy(tp, MII_TG3_CTRL,
824                              (MII_TG3_CTRL_AS_MASTER |
825                               MII_TG3_CTRL_ENABLE_AS_MASTER));
826
827                 /* Enable SM_DSP_CLOCK and 6dB.  */
828                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
829
830                 /* Block the PHY control access.  */
831                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
832                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
833
834                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
835                 if (!err)
836                         break;
837         } while (--retries);
838
839         err = tg3_phy_reset_chanpat(tp);
840         if (err)
841                 return err;
842
843         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
844         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
845
846         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
847         tg3_writephy(tp, 0x16, 0x0000);
848
849         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
850             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
851                 /* Set Extended packet length bit for jumbo frames */
852                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
853         }
854         else {
855                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
856         }
857
858         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
859
860         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
861                 reg32 &= ~0x3000;
862                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
863         } else if (!err)
864                 err = -EBUSY;
865
866         return err;
867 }
868
869 /* This will reset the tigon3 PHY if there is no valid
870  * link unless the FORCE argument is non-zero.
871  */
872 static int tg3_phy_reset(struct tg3 *tp)
873 {
874         u32 phy_status;
875         int err;
876
877         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
878         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
879         if (err != 0)
880                 return -EBUSY;
881
882         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
883             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
884             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
885                 err = tg3_phy_reset_5703_4_5(tp);
886                 if (err)
887                         return err;
888                 goto out;
889         }
890
891         err = tg3_bmcr_reset(tp);
892         if (err)
893                 return err;
894
895 out:
896         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
897                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
898                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
899                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
900                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
901                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
902                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
903         }
904         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
905                 tg3_writephy(tp, 0x1c, 0x8d68);
906                 tg3_writephy(tp, 0x1c, 0x8d68);
907         }
908         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
909                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
910                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
911                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
912                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
913                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
914                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
915                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
916                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
917         }
918         /* Set Extended packet length bit (bit 14) on all chips that */
919         /* support jumbo frames */
920         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
921                 /* Cannot do read-modify-write on 5401 */
922                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
923         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
924                 u32 phy_reg;
925
926                 /* Set bit 14 with read-modify-write to preserve other bits */
927                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
928                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
929                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
930         }
931
932         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
933          * jumbo frames transmission.
934          */
935         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
936                 u32 phy_reg;
937
938                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
939                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
940                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
941         }
942
943         tg3_phy_set_wirespeed(tp);
944         return 0;
945 }
946
947 static void tg3_frob_aux_power(struct tg3 *tp)
948 {
949         struct tg3 *tp_peer = tp;
950
951         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
952                 return;
953
954         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
955                 tp_peer = pci_get_drvdata(tp->pdev_peer);
956                 if (!tp_peer)
957                         BUG();
958         }
959
960
961         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
962             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
963                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
964                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
965                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
966                              (GRC_LCLCTRL_GPIO_OE0 |
967                               GRC_LCLCTRL_GPIO_OE1 |
968                               GRC_LCLCTRL_GPIO_OE2 |
969                               GRC_LCLCTRL_GPIO_OUTPUT0 |
970                               GRC_LCLCTRL_GPIO_OUTPUT1));
971                         udelay(100);
972                 } else {
973                         u32 no_gpio2;
974                         u32 grc_local_ctrl;
975
976                         if (tp_peer != tp &&
977                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
978                                 return;
979
980                         /* On 5753 and variants, GPIO2 cannot be used. */
981                         no_gpio2 = tp->nic_sram_data_cfg &
982                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
983
984                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
985                                          GRC_LCLCTRL_GPIO_OE1 |
986                                          GRC_LCLCTRL_GPIO_OE2 |
987                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
988                                          GRC_LCLCTRL_GPIO_OUTPUT2;
989                         if (no_gpio2) {
990                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
991                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
992                         }
993                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
994                                                 grc_local_ctrl);
995                         udelay(100);
996
997                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
998
999                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1000                                                 grc_local_ctrl);
1001                         udelay(100);
1002
1003                         if (!no_gpio2) {
1004                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1005                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1006                                        grc_local_ctrl);
1007                                 udelay(100);
1008                         }
1009                 }
1010         } else {
1011                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1012                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1013                         if (tp_peer != tp &&
1014                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1015                                 return;
1016
1017                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1018                              (GRC_LCLCTRL_GPIO_OE1 |
1019                               GRC_LCLCTRL_GPIO_OUTPUT1));
1020                         udelay(100);
1021
1022                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1023                              (GRC_LCLCTRL_GPIO_OE1));
1024                         udelay(100);
1025
1026                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1027                              (GRC_LCLCTRL_GPIO_OE1 |
1028                               GRC_LCLCTRL_GPIO_OUTPUT1));
1029                         udelay(100);
1030                 }
1031         }
1032 }
1033
1034 static int tg3_setup_phy(struct tg3 *, int);
1035
1036 #define RESET_KIND_SHUTDOWN     0
1037 #define RESET_KIND_INIT         1
1038 #define RESET_KIND_SUSPEND      2
1039
1040 static void tg3_write_sig_post_reset(struct tg3 *, int);
1041 static int tg3_halt_cpu(struct tg3 *, u32);
1042
1043 static int tg3_set_power_state(struct tg3 *tp, int state)
1044 {
1045         u32 misc_host_ctrl;
1046         u16 power_control, power_caps;
1047         int pm = tp->pm_cap;
1048
1049         /* Make sure register accesses (indirect or otherwise)
1050          * will function correctly.
1051          */
1052         pci_write_config_dword(tp->pdev,
1053                                TG3PCI_MISC_HOST_CTRL,
1054                                tp->misc_host_ctrl);
1055
1056         pci_read_config_word(tp->pdev,
1057                              pm + PCI_PM_CTRL,
1058                              &power_control);
1059         power_control |= PCI_PM_CTRL_PME_STATUS;
1060         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1061         switch (state) {
1062         case 0:
1063                 power_control |= 0;
1064                 pci_write_config_word(tp->pdev,
1065                                       pm + PCI_PM_CTRL,
1066                                       power_control);
1067                 udelay(100);    /* Delay after power state change */
1068
1069                 /* Switch out of Vaux if it is not a LOM */
1070                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1071                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1072                         udelay(100);
1073                 }
1074
1075                 return 0;
1076
1077         case 1:
1078                 power_control |= 1;
1079                 break;
1080
1081         case 2:
1082                 power_control |= 2;
1083                 break;
1084
1085         case 3:
1086                 power_control |= 3;
1087                 break;
1088
1089         default:
1090                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1091                        "requested.\n",
1092                        tp->dev->name, state);
1093                 return -EINVAL;
1094         };
1095
1096         power_control |= PCI_PM_CTRL_PME_ENABLE;
1097
1098         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1099         tw32(TG3PCI_MISC_HOST_CTRL,
1100              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1101
1102         if (tp->link_config.phy_is_low_power == 0) {
1103                 tp->link_config.phy_is_low_power = 1;
1104                 tp->link_config.orig_speed = tp->link_config.speed;
1105                 tp->link_config.orig_duplex = tp->link_config.duplex;
1106                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1107         }
1108
1109         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1110                 tp->link_config.speed = SPEED_10;
1111                 tp->link_config.duplex = DUPLEX_HALF;
1112                 tp->link_config.autoneg = AUTONEG_ENABLE;
1113                 tg3_setup_phy(tp, 0);
1114         }
1115
1116         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1117
1118         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1119                 u32 mac_mode;
1120
1121                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1122                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1123                         udelay(40);
1124
1125                         mac_mode = MAC_MODE_PORT_MODE_MII;
1126
1127                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1128                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1129                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1130                 } else {
1131                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1132                 }
1133
1134                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1135                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1136
1137                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1138                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1139                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1140
1141                 tw32_f(MAC_MODE, mac_mode);
1142                 udelay(100);
1143
1144                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1145                 udelay(10);
1146         }
1147
1148         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1149             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1150              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1151                 u32 base_val;
1152
1153                 base_val = tp->pci_clock_ctrl;
1154                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1155                              CLOCK_CTRL_TXCLK_DISABLE);
1156
1157                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1158                      CLOCK_CTRL_ALTCLK |
1159                      CLOCK_CTRL_PWRDOWN_PLL133);
1160                 udelay(40);
1161         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1162                 /* do nothing */
1163         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1164                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1165                 u32 newbits1, newbits2;
1166
1167                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1168                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1169                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1170                                     CLOCK_CTRL_TXCLK_DISABLE |
1171                                     CLOCK_CTRL_ALTCLK);
1172                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1173                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1174                         newbits1 = CLOCK_CTRL_625_CORE;
1175                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1176                 } else {
1177                         newbits1 = CLOCK_CTRL_ALTCLK;
1178                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1179                 }
1180
1181                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1182                 udelay(40);
1183
1184                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1185                 udelay(40);
1186
1187                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1188                         u32 newbits3;
1189
1190                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1191                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1192                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1193                                             CLOCK_CTRL_TXCLK_DISABLE |
1194                                             CLOCK_CTRL_44MHZ_CORE);
1195                         } else {
1196                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1197                         }
1198
1199                         tw32_f(TG3PCI_CLOCK_CTRL,
1200                                          tp->pci_clock_ctrl | newbits3);
1201                         udelay(40);
1202                 }
1203         }
1204
1205         tg3_frob_aux_power(tp);
1206
1207         /* Workaround for unstable PLL clock */
1208         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1209             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1210                 u32 val = tr32(0x7d00);
1211
1212                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1213                 tw32(0x7d00, val);
1214                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1215                         tg3_halt_cpu(tp, RX_CPU_BASE);
1216         }
1217
1218         /* Finally, set the new power state. */
1219         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1220         udelay(100);    /* Delay after power state change */
1221
1222         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1223
1224         return 0;
1225 }
1226
1227 static void tg3_link_report(struct tg3 *tp)
1228 {
1229         if (!netif_carrier_ok(tp->dev)) {
1230                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1231         } else {
1232                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1233                        tp->dev->name,
1234                        (tp->link_config.active_speed == SPEED_1000 ?
1235                         1000 :
1236                         (tp->link_config.active_speed == SPEED_100 ?
1237                          100 : 10)),
1238                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1239                         "full" : "half"));
1240
1241                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1242                        "%s for RX.\n",
1243                        tp->dev->name,
1244                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1245                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1246         }
1247 }
1248
1249 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1250 {
1251         u32 new_tg3_flags = 0;
1252         u32 old_rx_mode = tp->rx_mode;
1253         u32 old_tx_mode = tp->tx_mode;
1254
1255         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1256
1257                 /* Convert 1000BaseX flow control bits to 1000BaseT
1258                  * bits before resolving flow control.
1259                  */
1260                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1261                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1262                                        ADVERTISE_PAUSE_ASYM);
1263                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1264
1265                         if (local_adv & ADVERTISE_1000XPAUSE)
1266                                 local_adv |= ADVERTISE_PAUSE_CAP;
1267                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1268                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1269                         if (remote_adv & LPA_1000XPAUSE)
1270                                 remote_adv |= LPA_PAUSE_CAP;
1271                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1272                                 remote_adv |= LPA_PAUSE_ASYM;
1273                 }
1274
1275                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1276                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1277                                 if (remote_adv & LPA_PAUSE_CAP)
1278                                         new_tg3_flags |=
1279                                                 (TG3_FLAG_RX_PAUSE |
1280                                                 TG3_FLAG_TX_PAUSE);
1281                                 else if (remote_adv & LPA_PAUSE_ASYM)
1282                                         new_tg3_flags |=
1283                                                 (TG3_FLAG_RX_PAUSE);
1284                         } else {
1285                                 if (remote_adv & LPA_PAUSE_CAP)
1286                                         new_tg3_flags |=
1287                                                 (TG3_FLAG_RX_PAUSE |
1288                                                 TG3_FLAG_TX_PAUSE);
1289                         }
1290                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1291                         if ((remote_adv & LPA_PAUSE_CAP) &&
1292                         (remote_adv & LPA_PAUSE_ASYM))
1293                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1294                 }
1295
1296                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1297                 tp->tg3_flags |= new_tg3_flags;
1298         } else {
1299                 new_tg3_flags = tp->tg3_flags;
1300         }
1301
1302         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1303                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1304         else
1305                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1306
1307         if (old_rx_mode != tp->rx_mode) {
1308                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1309         }
1310         
1311         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1312                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1313         else
1314                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1315
1316         if (old_tx_mode != tp->tx_mode) {
1317                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1318         }
1319 }
1320
1321 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1322 {
1323         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1324         case MII_TG3_AUX_STAT_10HALF:
1325                 *speed = SPEED_10;
1326                 *duplex = DUPLEX_HALF;
1327                 break;
1328
1329         case MII_TG3_AUX_STAT_10FULL:
1330                 *speed = SPEED_10;
1331                 *duplex = DUPLEX_FULL;
1332                 break;
1333
1334         case MII_TG3_AUX_STAT_100HALF:
1335                 *speed = SPEED_100;
1336                 *duplex = DUPLEX_HALF;
1337                 break;
1338
1339         case MII_TG3_AUX_STAT_100FULL:
1340                 *speed = SPEED_100;
1341                 *duplex = DUPLEX_FULL;
1342                 break;
1343
1344         case MII_TG3_AUX_STAT_1000HALF:
1345                 *speed = SPEED_1000;
1346                 *duplex = DUPLEX_HALF;
1347                 break;
1348
1349         case MII_TG3_AUX_STAT_1000FULL:
1350                 *speed = SPEED_1000;
1351                 *duplex = DUPLEX_FULL;
1352                 break;
1353
1354         default:
1355                 *speed = SPEED_INVALID;
1356                 *duplex = DUPLEX_INVALID;
1357                 break;
1358         };
1359 }
1360
1361 static void tg3_phy_copper_begin(struct tg3 *tp)
1362 {
1363         u32 new_adv;
1364         int i;
1365
1366         if (tp->link_config.phy_is_low_power) {
1367                 /* Entering low power mode.  Disable gigabit and
1368                  * 100baseT advertisements.
1369                  */
1370                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1371
1372                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1373                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1374                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1375                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1376
1377                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1378         } else if (tp->link_config.speed == SPEED_INVALID) {
1379                 tp->link_config.advertising =
1380                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1381                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1382                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1383                          ADVERTISED_Autoneg | ADVERTISED_MII);
1384
1385                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1386                         tp->link_config.advertising &=
1387                                 ~(ADVERTISED_1000baseT_Half |
1388                                   ADVERTISED_1000baseT_Full);
1389
1390                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1391                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1392                         new_adv |= ADVERTISE_10HALF;
1393                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1394                         new_adv |= ADVERTISE_10FULL;
1395                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1396                         new_adv |= ADVERTISE_100HALF;
1397                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1398                         new_adv |= ADVERTISE_100FULL;
1399                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1400
1401                 if (tp->link_config.advertising &
1402                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1403                         new_adv = 0;
1404                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1405                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1406                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1407                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1408                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1409                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1410                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1411                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1412                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1413                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1414                 } else {
1415                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1416                 }
1417         } else {
1418                 /* Asking for a specific link mode. */
1419                 if (tp->link_config.speed == SPEED_1000) {
1420                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1421                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1422
1423                         if (tp->link_config.duplex == DUPLEX_FULL)
1424                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1425                         else
1426                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1427                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1428                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1429                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1430                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1431                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1432                 } else {
1433                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1434
1435                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1436                         if (tp->link_config.speed == SPEED_100) {
1437                                 if (tp->link_config.duplex == DUPLEX_FULL)
1438                                         new_adv |= ADVERTISE_100FULL;
1439                                 else
1440                                         new_adv |= ADVERTISE_100HALF;
1441                         } else {
1442                                 if (tp->link_config.duplex == DUPLEX_FULL)
1443                                         new_adv |= ADVERTISE_10FULL;
1444                                 else
1445                                         new_adv |= ADVERTISE_10HALF;
1446                         }
1447                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1448                 }
1449         }
1450
1451         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1452             tp->link_config.speed != SPEED_INVALID) {
1453                 u32 bmcr, orig_bmcr;
1454
1455                 tp->link_config.active_speed = tp->link_config.speed;
1456                 tp->link_config.active_duplex = tp->link_config.duplex;
1457
1458                 bmcr = 0;
1459                 switch (tp->link_config.speed) {
1460                 default:
1461                 case SPEED_10:
1462                         break;
1463
1464                 case SPEED_100:
1465                         bmcr |= BMCR_SPEED100;
1466                         break;
1467
1468                 case SPEED_1000:
1469                         bmcr |= TG3_BMCR_SPEED1000;
1470                         break;
1471                 };
1472
1473                 if (tp->link_config.duplex == DUPLEX_FULL)
1474                         bmcr |= BMCR_FULLDPLX;
1475
1476                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1477                     (bmcr != orig_bmcr)) {
1478                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1479                         for (i = 0; i < 1500; i++) {
1480                                 u32 tmp;
1481
1482                                 udelay(10);
1483                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1484                                     tg3_readphy(tp, MII_BMSR, &tmp))
1485                                         continue;
1486                                 if (!(tmp & BMSR_LSTATUS)) {
1487                                         udelay(40);
1488                                         break;
1489                                 }
1490                         }
1491                         tg3_writephy(tp, MII_BMCR, bmcr);
1492                         udelay(40);
1493                 }
1494         } else {
1495                 tg3_writephy(tp, MII_BMCR,
1496                              BMCR_ANENABLE | BMCR_ANRESTART);
1497         }
1498 }
1499
1500 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1501 {
1502         int err;
1503
1504         /* Turn off tap power management. */
1505         /* Set Extended packet length bit */
1506         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1507
1508         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1509         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1510
1511         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1512         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1513
1514         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1515         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1516
1517         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1518         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1519
1520         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1521         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1522
1523         udelay(40);
1524
1525         return err;
1526 }
1527
1528 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1529 {
1530         u32 adv_reg, all_mask;
1531
1532         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1533                 return 0;
1534
1535         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1536                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1537         if ((adv_reg & all_mask) != all_mask)
1538                 return 0;
1539         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1540                 u32 tg3_ctrl;
1541
1542                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1543                         return 0;
1544
1545                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1546                             MII_TG3_CTRL_ADV_1000_FULL);
1547                 if ((tg3_ctrl & all_mask) != all_mask)
1548                         return 0;
1549         }
1550         return 1;
1551 }
1552
1553 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1554 {
1555         int current_link_up;
1556         u32 bmsr, dummy;
1557         u16 current_speed;
1558         u8 current_duplex;
1559         int i, err;
1560
1561         tw32(MAC_EVENT, 0);
1562
1563         tw32_f(MAC_STATUS,
1564              (MAC_STATUS_SYNC_CHANGED |
1565               MAC_STATUS_CFG_CHANGED |
1566               MAC_STATUS_MI_COMPLETION |
1567               MAC_STATUS_LNKSTATE_CHANGED));
1568         udelay(40);
1569
1570         tp->mi_mode = MAC_MI_MODE_BASE;
1571         tw32_f(MAC_MI_MODE, tp->mi_mode);
1572         udelay(80);
1573
1574         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1575
1576         /* Some third-party PHYs need to be reset on link going
1577          * down.
1578          */
1579         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1580              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1581              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1582             netif_carrier_ok(tp->dev)) {
1583                 tg3_readphy(tp, MII_BMSR, &bmsr);
1584                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1585                     !(bmsr & BMSR_LSTATUS))
1586                         force_reset = 1;
1587         }
1588         if (force_reset)
1589                 tg3_phy_reset(tp);
1590
1591         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1592                 tg3_readphy(tp, MII_BMSR, &bmsr);
1593                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1594                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1595                         bmsr = 0;
1596
1597                 if (!(bmsr & BMSR_LSTATUS)) {
1598                         err = tg3_init_5401phy_dsp(tp);
1599                         if (err)
1600                                 return err;
1601
1602                         tg3_readphy(tp, MII_BMSR, &bmsr);
1603                         for (i = 0; i < 1000; i++) {
1604                                 udelay(10);
1605                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1606                                     (bmsr & BMSR_LSTATUS)) {
1607                                         udelay(40);
1608                                         break;
1609                                 }
1610                         }
1611
1612                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1613                             !(bmsr & BMSR_LSTATUS) &&
1614                             tp->link_config.active_speed == SPEED_1000) {
1615                                 err = tg3_phy_reset(tp);
1616                                 if (!err)
1617                                         err = tg3_init_5401phy_dsp(tp);
1618                                 if (err)
1619                                         return err;
1620                         }
1621                 }
1622         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1623                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1624                 /* 5701 {A0,B0} CRC bug workaround */
1625                 tg3_writephy(tp, 0x15, 0x0a75);
1626                 tg3_writephy(tp, 0x1c, 0x8c68);
1627                 tg3_writephy(tp, 0x1c, 0x8d68);
1628                 tg3_writephy(tp, 0x1c, 0x8c68);
1629         }
1630
1631         /* Clear pending interrupts... */
1632         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1633         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1634
1635         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1636                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1637         else
1638                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1639
1640         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1641             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1642                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1643                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1644                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1645                 else
1646                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1647         }
1648
1649         current_link_up = 0;
1650         current_speed = SPEED_INVALID;
1651         current_duplex = DUPLEX_INVALID;
1652
1653         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1654                 u32 val;
1655
1656                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1657                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1658                 if (!(val & (1 << 10))) {
1659                         val |= (1 << 10);
1660                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1661                         goto relink;
1662                 }
1663         }
1664
1665         bmsr = 0;
1666         for (i = 0; i < 100; i++) {
1667                 tg3_readphy(tp, MII_BMSR, &bmsr);
1668                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1669                     (bmsr & BMSR_LSTATUS))
1670                         break;
1671                 udelay(40);
1672         }
1673
1674         if (bmsr & BMSR_LSTATUS) {
1675                 u32 aux_stat, bmcr;
1676
1677                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1678                 for (i = 0; i < 2000; i++) {
1679                         udelay(10);
1680                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1681                             aux_stat)
1682                                 break;
1683                 }
1684
1685                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1686                                              &current_speed,
1687                                              &current_duplex);
1688
1689                 bmcr = 0;
1690                 for (i = 0; i < 200; i++) {
1691                         tg3_readphy(tp, MII_BMCR, &bmcr);
1692                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1693                                 continue;
1694                         if (bmcr && bmcr != 0x7fff)
1695                                 break;
1696                         udelay(10);
1697                 }
1698
1699                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1700                         if (bmcr & BMCR_ANENABLE) {
1701                                 current_link_up = 1;
1702
1703                                 /* Force autoneg restart if we are exiting
1704                                  * low power mode.
1705                                  */
1706                                 if (!tg3_copper_is_advertising_all(tp))
1707                                         current_link_up = 0;
1708                         } else {
1709                                 current_link_up = 0;
1710                         }
1711                 } else {
1712                         if (!(bmcr & BMCR_ANENABLE) &&
1713                             tp->link_config.speed == current_speed &&
1714                             tp->link_config.duplex == current_duplex) {
1715                                 current_link_up = 1;
1716                         } else {
1717                                 current_link_up = 0;
1718                         }
1719                 }
1720
1721                 tp->link_config.active_speed = current_speed;
1722                 tp->link_config.active_duplex = current_duplex;
1723         }
1724
1725         if (current_link_up == 1 &&
1726             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1727             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1728                 u32 local_adv, remote_adv;
1729
1730                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1731                         local_adv = 0;
1732                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1733
1734                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1735                         remote_adv = 0;
1736
1737                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1738
1739                 /* If we are not advertising full pause capability,
1740                  * something is wrong.  Bring the link down and reconfigure.
1741                  */
1742                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1743                         current_link_up = 0;
1744                 } else {
1745                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1746                 }
1747         }
1748 relink:
1749         if (current_link_up == 0) {
1750                 u32 tmp;
1751
1752                 tg3_phy_copper_begin(tp);
1753
1754                 tg3_readphy(tp, MII_BMSR, &tmp);
1755                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1756                     (tmp & BMSR_LSTATUS))
1757                         current_link_up = 1;
1758         }
1759
1760         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1761         if (current_link_up == 1) {
1762                 if (tp->link_config.active_speed == SPEED_100 ||
1763                     tp->link_config.active_speed == SPEED_10)
1764                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1765                 else
1766                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1767         } else
1768                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1769
1770         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1771         if (tp->link_config.active_duplex == DUPLEX_HALF)
1772                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1773
1774         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1775         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1776                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1777                     (current_link_up == 1 &&
1778                      tp->link_config.active_speed == SPEED_10))
1779                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1780         } else {
1781                 if (current_link_up == 1)
1782                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1783         }
1784
1785         /* ??? Without this setting Netgear GA302T PHY does not
1786          * ??? send/receive packets...
1787          */
1788         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1789             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1790                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1791                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1792                 udelay(80);
1793         }
1794
1795         tw32_f(MAC_MODE, tp->mac_mode);
1796         udelay(40);
1797
1798         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1799                 /* Polled via timer. */
1800                 tw32_f(MAC_EVENT, 0);
1801         } else {
1802                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1803         }
1804         udelay(40);
1805
1806         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1807             current_link_up == 1 &&
1808             tp->link_config.active_speed == SPEED_1000 &&
1809             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1810              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1811                 udelay(120);
1812                 tw32_f(MAC_STATUS,
1813                      (MAC_STATUS_SYNC_CHANGED |
1814                       MAC_STATUS_CFG_CHANGED));
1815                 udelay(40);
1816                 tg3_write_mem(tp,
1817                               NIC_SRAM_FIRMWARE_MBOX,
1818                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1819         }
1820
1821         if (current_link_up != netif_carrier_ok(tp->dev)) {
1822                 if (current_link_up)
1823                         netif_carrier_on(tp->dev);
1824                 else
1825                         netif_carrier_off(tp->dev);
1826                 tg3_link_report(tp);
1827         }
1828
1829         return 0;
1830 }
1831
1832 struct tg3_fiber_aneginfo {
1833         int state;
1834 #define ANEG_STATE_UNKNOWN              0
1835 #define ANEG_STATE_AN_ENABLE            1
1836 #define ANEG_STATE_RESTART_INIT         2
1837 #define ANEG_STATE_RESTART              3
1838 #define ANEG_STATE_DISABLE_LINK_OK      4
1839 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1840 #define ANEG_STATE_ABILITY_DETECT       6
1841 #define ANEG_STATE_ACK_DETECT_INIT      7
1842 #define ANEG_STATE_ACK_DETECT           8
1843 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1844 #define ANEG_STATE_COMPLETE_ACK         10
1845 #define ANEG_STATE_IDLE_DETECT_INIT     11
1846 #define ANEG_STATE_IDLE_DETECT          12
1847 #define ANEG_STATE_LINK_OK              13
1848 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1849 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1850
1851         u32 flags;
1852 #define MR_AN_ENABLE            0x00000001
1853 #define MR_RESTART_AN           0x00000002
1854 #define MR_AN_COMPLETE          0x00000004
1855 #define MR_PAGE_RX              0x00000008
1856 #define MR_NP_LOADED            0x00000010
1857 #define MR_TOGGLE_TX            0x00000020
1858 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1859 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1860 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1861 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1862 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1863 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1864 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1865 #define MR_TOGGLE_RX            0x00002000
1866 #define MR_NP_RX                0x00004000
1867
1868 #define MR_LINK_OK              0x80000000
1869
1870         unsigned long link_time, cur_time;
1871
1872         u32 ability_match_cfg;
1873         int ability_match_count;
1874
1875         char ability_match, idle_match, ack_match;
1876
1877         u32 txconfig, rxconfig;
1878 #define ANEG_CFG_NP             0x00000080
1879 #define ANEG_CFG_ACK            0x00000040
1880 #define ANEG_CFG_RF2            0x00000020
1881 #define ANEG_CFG_RF1            0x00000010
1882 #define ANEG_CFG_PS2            0x00000001
1883 #define ANEG_CFG_PS1            0x00008000
1884 #define ANEG_CFG_HD             0x00004000
1885 #define ANEG_CFG_FD             0x00002000
1886 #define ANEG_CFG_INVAL          0x00001f06
1887
1888 };
1889 #define ANEG_OK         0
1890 #define ANEG_DONE       1
1891 #define ANEG_TIMER_ENAB 2
1892 #define ANEG_FAILED     -1
1893
1894 #define ANEG_STATE_SETTLE_TIME  10000
1895
1896 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1897                                    struct tg3_fiber_aneginfo *ap)
1898 {
1899         unsigned long delta;
1900         u32 rx_cfg_reg;
1901         int ret;
1902
1903         if (ap->state == ANEG_STATE_UNKNOWN) {
1904                 ap->rxconfig = 0;
1905                 ap->link_time = 0;
1906                 ap->cur_time = 0;
1907                 ap->ability_match_cfg = 0;
1908                 ap->ability_match_count = 0;
1909                 ap->ability_match = 0;
1910                 ap->idle_match = 0;
1911                 ap->ack_match = 0;
1912         }
1913         ap->cur_time++;
1914
1915         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1916                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1917
1918                 if (rx_cfg_reg != ap->ability_match_cfg) {
1919                         ap->ability_match_cfg = rx_cfg_reg;
1920                         ap->ability_match = 0;
1921                         ap->ability_match_count = 0;
1922                 } else {
1923                         if (++ap->ability_match_count > 1) {
1924                                 ap->ability_match = 1;
1925                                 ap->ability_match_cfg = rx_cfg_reg;
1926                         }
1927                 }
1928                 if (rx_cfg_reg & ANEG_CFG_ACK)
1929                         ap->ack_match = 1;
1930                 else
1931                         ap->ack_match = 0;
1932
1933                 ap->idle_match = 0;
1934         } else {
1935                 ap->idle_match = 1;
1936                 ap->ability_match_cfg = 0;
1937                 ap->ability_match_count = 0;
1938                 ap->ability_match = 0;
1939                 ap->ack_match = 0;
1940
1941                 rx_cfg_reg = 0;
1942         }
1943
1944         ap->rxconfig = rx_cfg_reg;
1945         ret = ANEG_OK;
1946
1947         switch(ap->state) {
1948         case ANEG_STATE_UNKNOWN:
1949                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1950                         ap->state = ANEG_STATE_AN_ENABLE;
1951
1952                 /* fallthru */
1953         case ANEG_STATE_AN_ENABLE:
1954                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1955                 if (ap->flags & MR_AN_ENABLE) {
1956                         ap->link_time = 0;
1957                         ap->cur_time = 0;
1958                         ap->ability_match_cfg = 0;
1959                         ap->ability_match_count = 0;
1960                         ap->ability_match = 0;
1961                         ap->idle_match = 0;
1962                         ap->ack_match = 0;
1963
1964                         ap->state = ANEG_STATE_RESTART_INIT;
1965                 } else {
1966                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1967                 }
1968                 break;
1969
1970         case ANEG_STATE_RESTART_INIT:
1971                 ap->link_time = ap->cur_time;
1972                 ap->flags &= ~(MR_NP_LOADED);
1973                 ap->txconfig = 0;
1974                 tw32(MAC_TX_AUTO_NEG, 0);
1975                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1976                 tw32_f(MAC_MODE, tp->mac_mode);
1977                 udelay(40);
1978
1979                 ret = ANEG_TIMER_ENAB;
1980                 ap->state = ANEG_STATE_RESTART;
1981
1982                 /* fallthru */
1983         case ANEG_STATE_RESTART:
1984                 delta = ap->cur_time - ap->link_time;
1985                 if (delta > ANEG_STATE_SETTLE_TIME) {
1986                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1987                 } else {
1988                         ret = ANEG_TIMER_ENAB;
1989                 }
1990                 break;
1991
1992         case ANEG_STATE_DISABLE_LINK_OK:
1993                 ret = ANEG_DONE;
1994                 break;
1995
1996         case ANEG_STATE_ABILITY_DETECT_INIT:
1997                 ap->flags &= ~(MR_TOGGLE_TX);
1998                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1999                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2000                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2001                 tw32_f(MAC_MODE, tp->mac_mode);
2002                 udelay(40);
2003
2004                 ap->state = ANEG_STATE_ABILITY_DETECT;
2005                 break;
2006
2007         case ANEG_STATE_ABILITY_DETECT:
2008                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2009                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2010                 }
2011                 break;
2012
2013         case ANEG_STATE_ACK_DETECT_INIT:
2014                 ap->txconfig |= ANEG_CFG_ACK;
2015                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2016                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2017                 tw32_f(MAC_MODE, tp->mac_mode);
2018                 udelay(40);
2019
2020                 ap->state = ANEG_STATE_ACK_DETECT;
2021
2022                 /* fallthru */
2023         case ANEG_STATE_ACK_DETECT:
2024                 if (ap->ack_match != 0) {
2025                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2026                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2027                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2028                         } else {
2029                                 ap->state = ANEG_STATE_AN_ENABLE;
2030                         }
2031                 } else if (ap->ability_match != 0 &&
2032                            ap->rxconfig == 0) {
2033                         ap->state = ANEG_STATE_AN_ENABLE;
2034                 }
2035                 break;
2036
2037         case ANEG_STATE_COMPLETE_ACK_INIT:
2038                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2039                         ret = ANEG_FAILED;
2040                         break;
2041                 }
2042                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2043                                MR_LP_ADV_HALF_DUPLEX |
2044                                MR_LP_ADV_SYM_PAUSE |
2045                                MR_LP_ADV_ASYM_PAUSE |
2046                                MR_LP_ADV_REMOTE_FAULT1 |
2047                                MR_LP_ADV_REMOTE_FAULT2 |
2048                                MR_LP_ADV_NEXT_PAGE |
2049                                MR_TOGGLE_RX |
2050                                MR_NP_RX);
2051                 if (ap->rxconfig & ANEG_CFG_FD)
2052                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2053                 if (ap->rxconfig & ANEG_CFG_HD)
2054                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2055                 if (ap->rxconfig & ANEG_CFG_PS1)
2056                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2057                 if (ap->rxconfig & ANEG_CFG_PS2)
2058                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2059                 if (ap->rxconfig & ANEG_CFG_RF1)
2060                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2061                 if (ap->rxconfig & ANEG_CFG_RF2)
2062                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2063                 if (ap->rxconfig & ANEG_CFG_NP)
2064                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2065
2066                 ap->link_time = ap->cur_time;
2067
2068                 ap->flags ^= (MR_TOGGLE_TX);
2069                 if (ap->rxconfig & 0x0008)
2070                         ap->flags |= MR_TOGGLE_RX;
2071                 if (ap->rxconfig & ANEG_CFG_NP)
2072                         ap->flags |= MR_NP_RX;
2073                 ap->flags |= MR_PAGE_RX;
2074
2075                 ap->state = ANEG_STATE_COMPLETE_ACK;
2076                 ret = ANEG_TIMER_ENAB;
2077                 break;
2078
2079         case ANEG_STATE_COMPLETE_ACK:
2080                 if (ap->ability_match != 0 &&
2081                     ap->rxconfig == 0) {
2082                         ap->state = ANEG_STATE_AN_ENABLE;
2083                         break;
2084                 }
2085                 delta = ap->cur_time - ap->link_time;
2086                 if (delta > ANEG_STATE_SETTLE_TIME) {
2087                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2088                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2089                         } else {
2090                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2091                                     !(ap->flags & MR_NP_RX)) {
2092                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2093                                 } else {
2094                                         ret = ANEG_FAILED;
2095                                 }
2096                         }
2097                 }
2098                 break;
2099
2100         case ANEG_STATE_IDLE_DETECT_INIT:
2101                 ap->link_time = ap->cur_time;
2102                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2103                 tw32_f(MAC_MODE, tp->mac_mode);
2104                 udelay(40);
2105
2106                 ap->state = ANEG_STATE_IDLE_DETECT;
2107                 ret = ANEG_TIMER_ENAB;
2108                 break;
2109
2110         case ANEG_STATE_IDLE_DETECT:
2111                 if (ap->ability_match != 0 &&
2112                     ap->rxconfig == 0) {
2113                         ap->state = ANEG_STATE_AN_ENABLE;
2114                         break;
2115                 }
2116                 delta = ap->cur_time - ap->link_time;
2117                 if (delta > ANEG_STATE_SETTLE_TIME) {
2118                         /* XXX another gem from the Broadcom driver :( */
2119                         ap->state = ANEG_STATE_LINK_OK;
2120                 }
2121                 break;
2122
2123         case ANEG_STATE_LINK_OK:
2124                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2125                 ret = ANEG_DONE;
2126                 break;
2127
2128         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2129                 /* ??? unimplemented */
2130                 break;
2131
2132         case ANEG_STATE_NEXT_PAGE_WAIT:
2133                 /* ??? unimplemented */
2134                 break;
2135
2136         default:
2137                 ret = ANEG_FAILED;
2138                 break;
2139         };
2140
2141         return ret;
2142 }
2143
2144 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2145 {
2146         int res = 0;
2147         struct tg3_fiber_aneginfo aninfo;
2148         int status = ANEG_FAILED;
2149         unsigned int tick;
2150         u32 tmp;
2151
2152         tw32_f(MAC_TX_AUTO_NEG, 0);
2153
2154         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2155         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2156         udelay(40);
2157
2158         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2159         udelay(40);
2160
2161         memset(&aninfo, 0, sizeof(aninfo));
2162         aninfo.flags |= MR_AN_ENABLE;
2163         aninfo.state = ANEG_STATE_UNKNOWN;
2164         aninfo.cur_time = 0;
2165         tick = 0;
2166         while (++tick < 195000) {
2167                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2168                 if (status == ANEG_DONE || status == ANEG_FAILED)
2169                         break;
2170
2171                 udelay(1);
2172         }
2173
2174         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2175         tw32_f(MAC_MODE, tp->mac_mode);
2176         udelay(40);
2177
2178         *flags = aninfo.flags;
2179
2180         if (status == ANEG_DONE &&
2181             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2182                              MR_LP_ADV_FULL_DUPLEX)))
2183                 res = 1;
2184
2185         return res;
2186 }
2187
2188 static void tg3_init_bcm8002(struct tg3 *tp)
2189 {
2190         u32 mac_status = tr32(MAC_STATUS);
2191         int i;
2192
2193         /* Reset when initting first time or we have a link. */
2194         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2195             !(mac_status & MAC_STATUS_PCS_SYNCED))
2196                 return;
2197
2198         /* Set PLL lock range. */
2199         tg3_writephy(tp, 0x16, 0x8007);
2200
2201         /* SW reset */
2202         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2203
2204         /* Wait for reset to complete. */
2205         /* XXX schedule_timeout() ... */
2206         for (i = 0; i < 500; i++)
2207                 udelay(10);
2208
2209         /* Config mode; select PMA/Ch 1 regs. */
2210         tg3_writephy(tp, 0x10, 0x8411);
2211
2212         /* Enable auto-lock and comdet, select txclk for tx. */
2213         tg3_writephy(tp, 0x11, 0x0a10);
2214
2215         tg3_writephy(tp, 0x18, 0x00a0);
2216         tg3_writephy(tp, 0x16, 0x41ff);
2217
2218         /* Assert and deassert POR. */
2219         tg3_writephy(tp, 0x13, 0x0400);
2220         udelay(40);
2221         tg3_writephy(tp, 0x13, 0x0000);
2222
2223         tg3_writephy(tp, 0x11, 0x0a50);
2224         udelay(40);
2225         tg3_writephy(tp, 0x11, 0x0a10);
2226
2227         /* Wait for signal to stabilize */
2228         /* XXX schedule_timeout() ... */
2229         for (i = 0; i < 15000; i++)
2230                 udelay(10);
2231
2232         /* Deselect the channel register so we can read the PHYID
2233          * later.
2234          */
2235         tg3_writephy(tp, 0x10, 0x8011);
2236 }
2237
2238 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2239 {
2240         u32 sg_dig_ctrl, sg_dig_status;
2241         u32 serdes_cfg, expected_sg_dig_ctrl;
2242         int workaround, port_a;
2243         int current_link_up;
2244
2245         serdes_cfg = 0;
2246         expected_sg_dig_ctrl = 0;
2247         workaround = 0;
2248         port_a = 1;
2249         current_link_up = 0;
2250
2251         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2252             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2253                 workaround = 1;
2254                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2255                         port_a = 0;
2256
2257                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2258                 /* preserve bits 20-23 for voltage regulator */
2259                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2260         }
2261
2262         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2263
2264         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2265                 if (sg_dig_ctrl & (1 << 31)) {
2266                         if (workaround) {
2267                                 u32 val = serdes_cfg;
2268
2269                                 if (port_a)
2270                                         val |= 0xc010000;
2271                                 else
2272                                         val |= 0x4010000;
2273                                 tw32_f(MAC_SERDES_CFG, val);
2274                         }
2275                         tw32_f(SG_DIG_CTRL, 0x01388400);
2276                 }
2277                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2278                         tg3_setup_flow_control(tp, 0, 0);
2279                         current_link_up = 1;
2280                 }
2281                 goto out;
2282         }
2283
2284         /* Want auto-negotiation.  */
2285         expected_sg_dig_ctrl = 0x81388400;
2286
2287         /* Pause capability */
2288         expected_sg_dig_ctrl |= (1 << 11);
2289
2290         /* Asymettric pause */
2291         expected_sg_dig_ctrl |= (1 << 12);
2292
2293         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2294                 if (workaround)
2295                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2296                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2297                 udelay(5);
2298                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2299
2300                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2301         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2302                                  MAC_STATUS_SIGNAL_DET)) {
2303                 int i;
2304
2305                 /* Giver time to negotiate (~200ms) */
2306                 for (i = 0; i < 40000; i++) {
2307                         sg_dig_status = tr32(SG_DIG_STATUS);
2308                         if (sg_dig_status & (0x3))
2309                                 break;
2310                         udelay(5);
2311                 }
2312                 mac_status = tr32(MAC_STATUS);
2313
2314                 if ((sg_dig_status & (1 << 1)) &&
2315                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2316                         u32 local_adv, remote_adv;
2317
2318                         local_adv = ADVERTISE_PAUSE_CAP;
2319                         remote_adv = 0;
2320                         if (sg_dig_status & (1 << 19))
2321                                 remote_adv |= LPA_PAUSE_CAP;
2322                         if (sg_dig_status & (1 << 20))
2323                                 remote_adv |= LPA_PAUSE_ASYM;
2324
2325                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2326                         current_link_up = 1;
2327                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2328                 } else if (!(sg_dig_status & (1 << 1))) {
2329                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2330                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2331                         else {
2332                                 if (workaround) {
2333                                         u32 val = serdes_cfg;
2334
2335                                         if (port_a)
2336                                                 val |= 0xc010000;
2337                                         else
2338                                                 val |= 0x4010000;
2339
2340                                         tw32_f(MAC_SERDES_CFG, val);
2341                                 }
2342
2343                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2344                                 udelay(40);
2345
2346                                 /* Link parallel detection - link is up */
2347                                 /* only if we have PCS_SYNC and not */
2348                                 /* receiving config code words */
2349                                 mac_status = tr32(MAC_STATUS);
2350                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2351                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2352                                         tg3_setup_flow_control(tp, 0, 0);
2353                                         current_link_up = 1;
2354                                 }
2355                         }
2356                 }
2357         }
2358
2359 out:
2360         return current_link_up;
2361 }
2362
2363 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2364 {
2365         int current_link_up = 0;
2366
2367         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2368                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2369                 goto out;
2370         }
2371
2372         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2373                 u32 flags;
2374                 int i;
2375   
2376                 if (fiber_autoneg(tp, &flags)) {
2377                         u32 local_adv, remote_adv;
2378
2379                         local_adv = ADVERTISE_PAUSE_CAP;
2380                         remote_adv = 0;
2381                         if (flags & MR_LP_ADV_SYM_PAUSE)
2382                                 remote_adv |= LPA_PAUSE_CAP;
2383                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2384                                 remote_adv |= LPA_PAUSE_ASYM;
2385
2386                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2387
2388                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2389                         current_link_up = 1;
2390                 }
2391                 for (i = 0; i < 30; i++) {
2392                         udelay(20);
2393                         tw32_f(MAC_STATUS,
2394                                (MAC_STATUS_SYNC_CHANGED |
2395                                 MAC_STATUS_CFG_CHANGED));
2396                         udelay(40);
2397                         if ((tr32(MAC_STATUS) &
2398                              (MAC_STATUS_SYNC_CHANGED |
2399                               MAC_STATUS_CFG_CHANGED)) == 0)
2400                                 break;
2401                 }
2402
2403                 mac_status = tr32(MAC_STATUS);
2404                 if (current_link_up == 0 &&
2405                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2406                     !(mac_status & MAC_STATUS_RCVD_CFG))
2407                         current_link_up = 1;
2408         } else {
2409                 /* Forcing 1000FD link up. */
2410                 current_link_up = 1;
2411                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2412
2413                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2414                 udelay(40);
2415         }
2416
2417 out:
2418         return current_link_up;
2419 }
2420
2421 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2422 {
2423         u32 orig_pause_cfg;
2424         u16 orig_active_speed;
2425         u8 orig_active_duplex;
2426         u32 mac_status;
2427         int current_link_up;
2428         int i;
2429
2430         orig_pause_cfg =
2431                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2432                                   TG3_FLAG_TX_PAUSE));
2433         orig_active_speed = tp->link_config.active_speed;
2434         orig_active_duplex = tp->link_config.active_duplex;
2435
2436         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2437             netif_carrier_ok(tp->dev) &&
2438             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2439                 mac_status = tr32(MAC_STATUS);
2440                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2441                                MAC_STATUS_SIGNAL_DET |
2442                                MAC_STATUS_CFG_CHANGED |
2443                                MAC_STATUS_RCVD_CFG);
2444                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2445                                    MAC_STATUS_SIGNAL_DET)) {
2446                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2447                                             MAC_STATUS_CFG_CHANGED));
2448                         return 0;
2449                 }
2450         }
2451
2452         tw32_f(MAC_TX_AUTO_NEG, 0);
2453
2454         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2455         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2456         tw32_f(MAC_MODE, tp->mac_mode);
2457         udelay(40);
2458
2459         if (tp->phy_id == PHY_ID_BCM8002)
2460                 tg3_init_bcm8002(tp);
2461
2462         /* Enable link change event even when serdes polling.  */
2463         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2464         udelay(40);
2465
2466         current_link_up = 0;
2467         mac_status = tr32(MAC_STATUS);
2468
2469         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2470                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2471         else
2472                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2473
2474         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2475         tw32_f(MAC_MODE, tp->mac_mode);
2476         udelay(40);
2477
2478         tp->hw_status->status =
2479                 (SD_STATUS_UPDATED |
2480                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2481
2482         for (i = 0; i < 100; i++) {
2483                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2484                                     MAC_STATUS_CFG_CHANGED));
2485                 udelay(5);
2486                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2487                                          MAC_STATUS_CFG_CHANGED)) == 0)
2488                         break;
2489         }
2490
2491         mac_status = tr32(MAC_STATUS);
2492         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2493                 current_link_up = 0;
2494                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2495                         tw32_f(MAC_MODE, (tp->mac_mode |
2496                                           MAC_MODE_SEND_CONFIGS));
2497                         udelay(1);
2498                         tw32_f(MAC_MODE, tp->mac_mode);
2499                 }
2500         }
2501
2502         if (current_link_up == 1) {
2503                 tp->link_config.active_speed = SPEED_1000;
2504                 tp->link_config.active_duplex = DUPLEX_FULL;
2505                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2506                                     LED_CTRL_LNKLED_OVERRIDE |
2507                                     LED_CTRL_1000MBPS_ON));
2508         } else {
2509                 tp->link_config.active_speed = SPEED_INVALID;
2510                 tp->link_config.active_duplex = DUPLEX_INVALID;
2511                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2512                                     LED_CTRL_LNKLED_OVERRIDE |
2513                                     LED_CTRL_TRAFFIC_OVERRIDE));
2514         }
2515
2516         if (current_link_up != netif_carrier_ok(tp->dev)) {
2517                 if (current_link_up)
2518                         netif_carrier_on(tp->dev);
2519                 else
2520                         netif_carrier_off(tp->dev);
2521                 tg3_link_report(tp);
2522         } else {
2523                 u32 now_pause_cfg =
2524                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2525                                          TG3_FLAG_TX_PAUSE);
2526                 if (orig_pause_cfg != now_pause_cfg ||
2527                     orig_active_speed != tp->link_config.active_speed ||
2528                     orig_active_duplex != tp->link_config.active_duplex)
2529                         tg3_link_report(tp);
2530         }
2531
2532         return 0;
2533 }
2534
2535 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2536 {
2537         int current_link_up, err = 0;
2538         u32 bmsr, bmcr;
2539         u16 current_speed;
2540         u8 current_duplex;
2541
2542         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2543         tw32_f(MAC_MODE, tp->mac_mode);
2544         udelay(40);
2545
2546         tw32(MAC_EVENT, 0);
2547
2548         tw32_f(MAC_STATUS,
2549              (MAC_STATUS_SYNC_CHANGED |
2550               MAC_STATUS_CFG_CHANGED |
2551               MAC_STATUS_MI_COMPLETION |
2552               MAC_STATUS_LNKSTATE_CHANGED));
2553         udelay(40);
2554
2555         if (force_reset)
2556                 tg3_phy_reset(tp);
2557
2558         current_link_up = 0;
2559         current_speed = SPEED_INVALID;
2560         current_duplex = DUPLEX_INVALID;
2561
2562         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2563         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2564
2565         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2566
2567         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2568             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2569                 /* do nothing, just check for link up at the end */
2570         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2571                 u32 adv, new_adv;
2572
2573                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2574                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2575                                   ADVERTISE_1000XPAUSE |
2576                                   ADVERTISE_1000XPSE_ASYM |
2577                                   ADVERTISE_SLCT);
2578
2579                 /* Always advertise symmetric PAUSE just like copper */
2580                 new_adv |= ADVERTISE_1000XPAUSE;
2581
2582                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2583                         new_adv |= ADVERTISE_1000XHALF;
2584                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2585                         new_adv |= ADVERTISE_1000XFULL;
2586
2587                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2588                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2589                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2590                         tg3_writephy(tp, MII_BMCR, bmcr);
2591
2592                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2593                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2594                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2595
2596                         return err;
2597                 }
2598         } else {
2599                 u32 new_bmcr;
2600
2601                 bmcr &= ~BMCR_SPEED1000;
2602                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2603
2604                 if (tp->link_config.duplex == DUPLEX_FULL)
2605                         new_bmcr |= BMCR_FULLDPLX;
2606
2607                 if (new_bmcr != bmcr) {
2608                         /* BMCR_SPEED1000 is a reserved bit that needs
2609                          * to be set on write.
2610                          */
2611                         new_bmcr |= BMCR_SPEED1000;
2612
2613                         /* Force a linkdown */
2614                         if (netif_carrier_ok(tp->dev)) {
2615                                 u32 adv;
2616
2617                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2618                                 adv &= ~(ADVERTISE_1000XFULL |
2619                                          ADVERTISE_1000XHALF |
2620                                          ADVERTISE_SLCT);
2621                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2622                                 tg3_writephy(tp, MII_BMCR, bmcr |
2623                                                            BMCR_ANRESTART |
2624                                                            BMCR_ANENABLE);
2625                                 udelay(10);
2626                                 netif_carrier_off(tp->dev);
2627                         }
2628                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2629                         bmcr = new_bmcr;
2630                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2631                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2632                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2633                 }
2634         }
2635
2636         if (bmsr & BMSR_LSTATUS) {
2637                 current_speed = SPEED_1000;
2638                 current_link_up = 1;
2639                 if (bmcr & BMCR_FULLDPLX)
2640                         current_duplex = DUPLEX_FULL;
2641                 else
2642                         current_duplex = DUPLEX_HALF;
2643
2644                 if (bmcr & BMCR_ANENABLE) {
2645                         u32 local_adv, remote_adv, common;
2646
2647                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2648                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2649                         common = local_adv & remote_adv;
2650                         if (common & (ADVERTISE_1000XHALF |
2651                                       ADVERTISE_1000XFULL)) {
2652                                 if (common & ADVERTISE_1000XFULL)
2653                                         current_duplex = DUPLEX_FULL;
2654                                 else
2655                                         current_duplex = DUPLEX_HALF;
2656
2657                                 tg3_setup_flow_control(tp, local_adv,
2658                                                        remote_adv);
2659                         }
2660                         else
2661                                 current_link_up = 0;
2662                 }
2663         }
2664
2665         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2666         if (tp->link_config.active_duplex == DUPLEX_HALF)
2667                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2668
2669         tw32_f(MAC_MODE, tp->mac_mode);
2670         udelay(40);
2671
2672         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2673
2674         tp->link_config.active_speed = current_speed;
2675         tp->link_config.active_duplex = current_duplex;
2676
2677         if (current_link_up != netif_carrier_ok(tp->dev)) {
2678                 if (current_link_up)
2679                         netif_carrier_on(tp->dev);
2680                 else {
2681                         netif_carrier_off(tp->dev);
2682                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2683                 }
2684                 tg3_link_report(tp);
2685         }
2686         return err;
2687 }
2688
2689 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2690 {
2691         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2692                 /* Give autoneg time to complete. */
2693                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2694                 return;
2695         }
2696         if (!netif_carrier_ok(tp->dev) &&
2697             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2698                 u32 bmcr;
2699
2700                 tg3_readphy(tp, MII_BMCR, &bmcr);
2701                 if (bmcr & BMCR_ANENABLE) {
2702                         u32 phy1, phy2;
2703
2704                         /* Select shadow register 0x1f */
2705                         tg3_writephy(tp, 0x1c, 0x7c00);
2706                         tg3_readphy(tp, 0x1c, &phy1);
2707
2708                         /* Select expansion interrupt status register */
2709                         tg3_writephy(tp, 0x17, 0x0f01);
2710                         tg3_readphy(tp, 0x15, &phy2);
2711                         tg3_readphy(tp, 0x15, &phy2);
2712
2713                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2714                                 /* We have signal detect and not receiving
2715                                  * config code words, link is up by parallel
2716                                  * detection.
2717                                  */
2718
2719                                 bmcr &= ~BMCR_ANENABLE;
2720                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2721                                 tg3_writephy(tp, MII_BMCR, bmcr);
2722                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2723                         }
2724                 }
2725         }
2726         else if (netif_carrier_ok(tp->dev) &&
2727                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2728                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2729                 u32 phy2;
2730
2731                 /* Select expansion interrupt status register */
2732                 tg3_writephy(tp, 0x17, 0x0f01);
2733                 tg3_readphy(tp, 0x15, &phy2);
2734                 if (phy2 & 0x20) {
2735                         u32 bmcr;
2736
2737                         /* Config code words received, turn on autoneg. */
2738                         tg3_readphy(tp, MII_BMCR, &bmcr);
2739                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2740
2741                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2742
2743                 }
2744         }
2745 }
2746
2747 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2748 {
2749         int err;
2750
2751         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2752                 err = tg3_setup_fiber_phy(tp, force_reset);
2753         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2754                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2755         } else {
2756                 err = tg3_setup_copper_phy(tp, force_reset);
2757         }
2758
2759         if (tp->link_config.active_speed == SPEED_1000 &&
2760             tp->link_config.active_duplex == DUPLEX_HALF)
2761                 tw32(MAC_TX_LENGTHS,
2762                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2763                       (6 << TX_LENGTHS_IPG_SHIFT) |
2764                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2765         else
2766                 tw32(MAC_TX_LENGTHS,
2767                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2768                       (6 << TX_LENGTHS_IPG_SHIFT) |
2769                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2770
2771         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2772                 if (netif_carrier_ok(tp->dev)) {
2773                         tw32(HOSTCC_STAT_COAL_TICKS,
2774                              tp->coal.stats_block_coalesce_usecs);
2775                 } else {
2776                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2777                 }
2778         }
2779
2780         return err;
2781 }
2782
2783 /* Tigon3 never reports partial packet sends.  So we do not
2784  * need special logic to handle SKBs that have not had all
2785  * of their frags sent yet, like SunGEM does.
2786  */
2787 static void tg3_tx(struct tg3 *tp)
2788 {
2789         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2790         u32 sw_idx = tp->tx_cons;
2791
2792         while (sw_idx != hw_idx) {
2793                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2794                 struct sk_buff *skb = ri->skb;
2795                 int i;
2796
2797                 if (unlikely(skb == NULL))
2798                         BUG();
2799
2800                 pci_unmap_single(tp->pdev,
2801                                  pci_unmap_addr(ri, mapping),
2802                                  skb_headlen(skb),
2803                                  PCI_DMA_TODEVICE);
2804
2805                 ri->skb = NULL;
2806
2807                 sw_idx = NEXT_TX(sw_idx);
2808
2809                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2810                         if (unlikely(sw_idx == hw_idx))
2811                                 BUG();
2812
2813                         ri = &tp->tx_buffers[sw_idx];
2814                         if (unlikely(ri->skb != NULL))
2815                                 BUG();
2816
2817                         pci_unmap_page(tp->pdev,
2818                                        pci_unmap_addr(ri, mapping),
2819                                        skb_shinfo(skb)->frags[i].size,
2820                                        PCI_DMA_TODEVICE);
2821
2822                         sw_idx = NEXT_TX(sw_idx);
2823                 }
2824
2825                 dev_kfree_skb(skb);
2826         }
2827
2828         tp->tx_cons = sw_idx;
2829
2830         if (netif_queue_stopped(tp->dev) &&
2831             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2832                 netif_wake_queue(tp->dev);
2833 }
2834
2835 /* Returns size of skb allocated or < 0 on error.
2836  *
2837  * We only need to fill in the address because the other members
2838  * of the RX descriptor are invariant, see tg3_init_rings.
2839  *
2840  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2841  * posting buffers we only dirty the first cache line of the RX
2842  * descriptor (containing the address).  Whereas for the RX status
2843  * buffers the cpu only reads the last cacheline of the RX descriptor
2844  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2845  */
2846 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2847                             int src_idx, u32 dest_idx_unmasked)
2848 {
2849         struct tg3_rx_buffer_desc *desc;
2850         struct ring_info *map, *src_map;
2851         struct sk_buff *skb;
2852         dma_addr_t mapping;
2853         int skb_size, dest_idx;
2854
2855         src_map = NULL;
2856         switch (opaque_key) {
2857         case RXD_OPAQUE_RING_STD:
2858                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2859                 desc = &tp->rx_std[dest_idx];
2860                 map = &tp->rx_std_buffers[dest_idx];
2861                 if (src_idx >= 0)
2862                         src_map = &tp->rx_std_buffers[src_idx];
2863                 skb_size = tp->rx_pkt_buf_sz;
2864                 break;
2865
2866         case RXD_OPAQUE_RING_JUMBO:
2867                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2868                 desc = &tp->rx_jumbo[dest_idx];
2869                 map = &tp->rx_jumbo_buffers[dest_idx];
2870                 if (src_idx >= 0)
2871                         src_map = &tp->rx_jumbo_buffers[src_idx];
2872                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2873                 break;
2874
2875         default:
2876                 return -EINVAL;
2877         };
2878
2879         /* Do not overwrite any of the map or rp information
2880          * until we are sure we can commit to a new buffer.
2881          *
2882          * Callers depend upon this behavior and assume that
2883          * we leave everything unchanged if we fail.
2884          */
2885         skb = dev_alloc_skb(skb_size);
2886         if (skb == NULL)
2887                 return -ENOMEM;
2888
2889         skb->dev = tp->dev;
2890         skb_reserve(skb, tp->rx_offset);
2891
2892         mapping = pci_map_single(tp->pdev, skb->data,
2893                                  skb_size - tp->rx_offset,
2894                                  PCI_DMA_FROMDEVICE);
2895
2896         map->skb = skb;
2897         pci_unmap_addr_set(map, mapping, mapping);
2898
2899         if (src_map != NULL)
2900                 src_map->skb = NULL;
2901
2902         desc->addr_hi = ((u64)mapping >> 32);
2903         desc->addr_lo = ((u64)mapping & 0xffffffff);
2904
2905         return skb_size;
2906 }
2907
2908 /* We only need to move over in the address because the other
2909  * members of the RX descriptor are invariant.  See notes above
2910  * tg3_alloc_rx_skb for full details.
2911  */
2912 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2913                            int src_idx, u32 dest_idx_unmasked)
2914 {
2915         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2916         struct ring_info *src_map, *dest_map;
2917         int dest_idx;
2918
2919         switch (opaque_key) {
2920         case RXD_OPAQUE_RING_STD:
2921                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2922                 dest_desc = &tp->rx_std[dest_idx];
2923                 dest_map = &tp->rx_std_buffers[dest_idx];
2924                 src_desc = &tp->rx_std[src_idx];
2925                 src_map = &tp->rx_std_buffers[src_idx];
2926                 break;
2927
2928         case RXD_OPAQUE_RING_JUMBO:
2929                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2930                 dest_desc = &tp->rx_jumbo[dest_idx];
2931                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2932                 src_desc = &tp->rx_jumbo[src_idx];
2933                 src_map = &tp->rx_jumbo_buffers[src_idx];
2934                 break;
2935
2936         default:
2937                 return;
2938         };
2939
2940         dest_map->skb = src_map->skb;
2941         pci_unmap_addr_set(dest_map, mapping,
2942                            pci_unmap_addr(src_map, mapping));
2943         dest_desc->addr_hi = src_desc->addr_hi;
2944         dest_desc->addr_lo = src_desc->addr_lo;
2945
2946         src_map->skb = NULL;
2947 }
2948
2949 #if TG3_VLAN_TAG_USED
2950 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2951 {
2952         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2953 }
2954 #endif
2955
2956 /* The RX ring scheme is composed of multiple rings which post fresh
2957  * buffers to the chip, and one special ring the chip uses to report
2958  * status back to the host.
2959  *
2960  * The special ring reports the status of received packets to the
2961  * host.  The chip does not write into the original descriptor the
2962  * RX buffer was obtained from.  The chip simply takes the original
2963  * descriptor as provided by the host, updates the status and length
2964  * field, then writes this into the next status ring entry.
2965  *
2966  * Each ring the host uses to post buffers to the chip is described
2967  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2968  * it is first placed into the on-chip ram.  When the packet's length
2969  * is known, it walks down the TG3_BDINFO entries to select the ring.
2970  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2971  * which is within the range of the new packet's length is chosen.
2972  *
2973  * The "separate ring for rx status" scheme may sound queer, but it makes
2974  * sense from a cache coherency perspective.  If only the host writes
2975  * to the buffer post rings, and only the chip writes to the rx status
2976  * rings, then cache lines never move beyond shared-modified state.
2977  * If both the host and chip were to write into the same ring, cache line
2978  * eviction could occur since both entities want it in an exclusive state.
2979  */
2980 static int tg3_rx(struct tg3 *tp, int budget)
2981 {
2982         u32 work_mask;
2983         u32 sw_idx = tp->rx_rcb_ptr;
2984         u16 hw_idx;
2985         int received;
2986
2987         hw_idx = tp->hw_status->idx[0].rx_producer;
2988         /*
2989          * We need to order the read of hw_idx and the read of
2990          * the opaque cookie.
2991          */
2992         rmb();
2993         work_mask = 0;
2994         received = 0;
2995         while (sw_idx != hw_idx && budget > 0) {
2996                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2997                 unsigned int len;
2998                 struct sk_buff *skb;
2999                 dma_addr_t dma_addr;
3000                 u32 opaque_key, desc_idx, *post_ptr;
3001
3002                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3003                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3004                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3005                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3006                                                   mapping);
3007                         skb = tp->rx_std_buffers[desc_idx].skb;
3008                         post_ptr = &tp->rx_std_ptr;
3009                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3010                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3011                                                   mapping);
3012                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3013                         post_ptr = &tp->rx_jumbo_ptr;
3014                 }
3015                 else {
3016                         goto next_pkt_nopost;
3017                 }
3018
3019                 work_mask |= opaque_key;
3020
3021                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3022                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3023                 drop_it:
3024                         tg3_recycle_rx(tp, opaque_key,
3025                                        desc_idx, *post_ptr);
3026                 drop_it_no_recycle:
3027                         /* Other statistics kept track of by card. */
3028                         tp->net_stats.rx_dropped++;
3029                         goto next_pkt;
3030                 }
3031
3032                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3033
3034                 if (len > RX_COPY_THRESHOLD 
3035                         && tp->rx_offset == 2
3036                         /* rx_offset != 2 iff this is a 5701 card running
3037                          * in PCI-X mode [see tg3_get_invariants()] */
3038                 ) {
3039                         int skb_size;
3040
3041                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3042                                                     desc_idx, *post_ptr);
3043                         if (skb_size < 0)
3044                                 goto drop_it;
3045
3046                         pci_unmap_single(tp->pdev, dma_addr,
3047                                          skb_size - tp->rx_offset,
3048                                          PCI_DMA_FROMDEVICE);
3049
3050                         skb_put(skb, len);
3051                 } else {
3052                         struct sk_buff *copy_skb;
3053
3054                         tg3_recycle_rx(tp, opaque_key,
3055                                        desc_idx, *post_ptr);
3056
3057                         copy_skb = dev_alloc_skb(len + 2);
3058                         if (copy_skb == NULL)
3059                                 goto drop_it_no_recycle;
3060
3061                         copy_skb->dev = tp->dev;
3062                         skb_reserve(copy_skb, 2);
3063                         skb_put(copy_skb, len);
3064                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3065                         memcpy(copy_skb->data, skb->data, len);
3066                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3067
3068                         /* We'll reuse the original ring buffer. */
3069                         skb = copy_skb;
3070                 }
3071
3072                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3073                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3074                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3075                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3076                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3077                 else
3078                         skb->ip_summed = CHECKSUM_NONE;
3079
3080                 skb->protocol = eth_type_trans(skb, tp->dev);
3081 #if TG3_VLAN_TAG_USED
3082                 if (tp->vlgrp != NULL &&
3083                     desc->type_flags & RXD_FLAG_VLAN) {
3084                         tg3_vlan_rx(tp, skb,
3085                                     desc->err_vlan & RXD_VLAN_MASK);
3086                 } else
3087 #endif
3088                         netif_receive_skb(skb);
3089
3090                 tp->dev->last_rx = jiffies;
3091                 received++;
3092                 budget--;
3093
3094 next_pkt:
3095                 (*post_ptr)++;
3096 next_pkt_nopost:
3097                 sw_idx++;
3098                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3099
3100                 /* Refresh hw_idx to see if there is new work */
3101                 if (sw_idx == hw_idx) {
3102                         hw_idx = tp->hw_status->idx[0].rx_producer;
3103                         rmb();
3104                 }
3105         }
3106
3107         /* ACK the status ring. */
3108         tp->rx_rcb_ptr = sw_idx;
3109         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3110
3111         /* Refill RX ring(s). */
3112         if (work_mask & RXD_OPAQUE_RING_STD) {
3113                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3114                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3115                              sw_idx);
3116         }
3117         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3118                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3119                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3120                              sw_idx);
3121         }
3122         mmiowb();
3123
3124         return received;
3125 }
3126
3127 static int tg3_poll(struct net_device *netdev, int *budget)
3128 {
3129         struct tg3 *tp = netdev_priv(netdev);
3130         struct tg3_hw_status *sblk = tp->hw_status;
3131         int done;
3132
3133         /* handle link change and other phy events */
3134         if (!(tp->tg3_flags &
3135               (TG3_FLAG_USE_LINKCHG_REG |
3136                TG3_FLAG_POLL_SERDES))) {
3137                 if (sblk->status & SD_STATUS_LINK_CHG) {
3138                         sblk->status = SD_STATUS_UPDATED |
3139                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3140                         spin_lock(&tp->lock);
3141                         tg3_setup_phy(tp, 0);
3142                         spin_unlock(&tp->lock);
3143                 }
3144         }
3145
3146         /* run TX completion thread */
3147         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3148                 spin_lock(&tp->tx_lock);
3149                 tg3_tx(tp);
3150                 spin_unlock(&tp->tx_lock);
3151         }
3152
3153         /* run RX thread, within the bounds set by NAPI.
3154          * All RX "locking" is done by ensuring outside
3155          * code synchronizes with dev->poll()
3156          */
3157         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3158                 int orig_budget = *budget;
3159                 int work_done;
3160
3161                 if (orig_budget > netdev->quota)
3162                         orig_budget = netdev->quota;
3163
3164                 work_done = tg3_rx(tp, orig_budget);
3165
3166                 *budget -= work_done;
3167                 netdev->quota -= work_done;
3168         }
3169
3170         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3171                 tp->last_tag = sblk->status_tag;
3172         rmb();
3173         sblk->status &= ~SD_STATUS_UPDATED;
3174
3175         /* if no more work, tell net stack and NIC we're done */
3176         done = !tg3_has_work(tp);
3177         if (done) {
3178                 spin_lock(&tp->lock);
3179                 netif_rx_complete(netdev);
3180                 tg3_restart_ints(tp);
3181                 spin_unlock(&tp->lock);
3182         }
3183
3184         return (done ? 0 : 1);
3185 }
3186
3187 static void tg3_irq_quiesce(struct tg3 *tp)
3188 {
3189         BUG_ON(tp->irq_sync);
3190
3191         tp->irq_sync = 1;
3192         smp_mb();
3193
3194         synchronize_irq(tp->pdev->irq);
3195 }
3196
3197 static inline int tg3_irq_sync(struct tg3 *tp)
3198 {
3199         return tp->irq_sync;
3200 }
3201
3202 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3203  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3204  * with as well.  Most of the time, this is not necessary except when
3205  * shutting down the device.
3206  */
3207 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3208 {
3209         if (irq_sync)
3210                 tg3_irq_quiesce(tp);
3211         spin_lock_bh(&tp->lock);
3212         spin_lock(&tp->tx_lock);
3213 }
3214
3215 static inline void tg3_full_unlock(struct tg3 *tp)
3216 {
3217         spin_unlock(&tp->tx_lock);
3218         spin_unlock_bh(&tp->lock);
3219 }
3220
3221 /* MSI ISR - No need to check for interrupt sharing and no need to
3222  * flush status block and interrupt mailbox. PCI ordering rules
3223  * guarantee that MSI will arrive after the status block.
3224  */
3225 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3226 {
3227         struct net_device *dev = dev_id;
3228         struct tg3 *tp = netdev_priv(dev);
3229         struct tg3_hw_status *sblk = tp->hw_status;
3230
3231         /*
3232          * Writing any value to intr-mbox-0 clears PCI INTA# and
3233          * chip-internal interrupt pending events.
3234          * Writing non-zero to intr-mbox-0 additional tells the
3235          * NIC to stop sending us irqs, engaging "in-intr-handler"
3236          * event coalescing.
3237          */
3238         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3239         tp->last_tag = sblk->status_tag;
3240         rmb();
3241         if (tg3_irq_sync(tp))
3242                 goto out;
3243         sblk->status &= ~SD_STATUS_UPDATED;
3244         if (likely(tg3_has_work(tp)))
3245                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3246         else {
3247                 /* No work, re-enable interrupts.  */
3248                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3249                              tp->last_tag << 24);
3250         }
3251 out:
3252         return IRQ_RETVAL(1);
3253 }
3254
3255 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3256 {
3257         struct net_device *dev = dev_id;
3258         struct tg3 *tp = netdev_priv(dev);
3259         struct tg3_hw_status *sblk = tp->hw_status;
3260         unsigned int handled = 1;
3261
3262         /* In INTx mode, it is possible for the interrupt to arrive at
3263          * the CPU before the status block posted prior to the interrupt.
3264          * Reading the PCI State register will confirm whether the
3265          * interrupt is ours and will flush the status block.
3266          */
3267         if ((sblk->status & SD_STATUS_UPDATED) ||
3268             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3269                 /*
3270                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3271                  * chip-internal interrupt pending events.
3272                  * Writing non-zero to intr-mbox-0 additional tells the
3273                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3274                  * event coalescing.
3275                  */
3276                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3277                              0x00000001);
3278                 if (tg3_irq_sync(tp))
3279                         goto out;
3280                 sblk->status &= ~SD_STATUS_UPDATED;
3281                 if (likely(tg3_has_work(tp)))
3282                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3283                 else {
3284                         /* No work, shared interrupt perhaps?  re-enable
3285                          * interrupts, and flush that PCI write
3286                          */
3287                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3288                                 0x00000000);
3289                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3290                 }
3291         } else {        /* shared interrupt */
3292                 handled = 0;
3293         }
3294 out:
3295         return IRQ_RETVAL(handled);
3296 }
3297
3298 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3299 {
3300         struct net_device *dev = dev_id;
3301         struct tg3 *tp = netdev_priv(dev);
3302         struct tg3_hw_status *sblk = tp->hw_status;
3303         unsigned int handled = 1;
3304
3305         /* In INTx mode, it is possible for the interrupt to arrive at
3306          * the CPU before the status block posted prior to the interrupt.
3307          * Reading the PCI State register will confirm whether the
3308          * interrupt is ours and will flush the status block.
3309          */
3310         if ((sblk->status & SD_STATUS_UPDATED) ||
3311             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3312                 /*
3313                  * writing any value to intr-mbox-0 clears PCI INTA# and
3314                  * chip-internal interrupt pending events.
3315                  * writing non-zero to intr-mbox-0 additional tells the
3316                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3317                  * event coalescing.
3318                  */
3319                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3320                              0x00000001);
3321                 tp->last_tag = sblk->status_tag;
3322                 rmb();
3323                 if (tg3_irq_sync(tp))
3324                         goto out;
3325                 sblk->status &= ~SD_STATUS_UPDATED;
3326                 if (likely(tg3_has_work(tp)))
3327                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3328                 else {
3329                         /* no work, shared interrupt perhaps?  re-enable
3330                          * interrupts, and flush that PCI write
3331                          */
3332                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3333                                      tp->last_tag << 24);
3334                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3335                 }
3336         } else {        /* shared interrupt */
3337                 handled = 0;
3338         }
3339 out:
3340         return IRQ_RETVAL(handled);
3341 }
3342
3343 /* ISR for interrupt test */
3344 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3345                 struct pt_regs *regs)
3346 {
3347         struct net_device *dev = dev_id;
3348         struct tg3 *tp = netdev_priv(dev);
3349         struct tg3_hw_status *sblk = tp->hw_status;
3350
3351         if (sblk->status & SD_STATUS_UPDATED) {
3352                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3353                              0x00000001);
3354                 return IRQ_RETVAL(1);
3355         }
3356         return IRQ_RETVAL(0);
3357 }
3358
3359 static int tg3_init_hw(struct tg3 *);
3360 static int tg3_halt(struct tg3 *, int, int);
3361
3362 #ifdef CONFIG_NET_POLL_CONTROLLER
3363 static void tg3_poll_controller(struct net_device *dev)
3364 {
3365         struct tg3 *tp = netdev_priv(dev);
3366
3367         tg3_interrupt(tp->pdev->irq, dev, NULL);
3368 }
3369 #endif
3370
3371 static void tg3_reset_task(void *_data)
3372 {
3373         struct tg3 *tp = _data;
3374         unsigned int restart_timer;
3375
3376         tg3_netif_stop(tp);
3377
3378         tg3_full_lock(tp, 1);
3379
3380         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3381         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3382
3383         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3384         tg3_init_hw(tp);
3385
3386         tg3_netif_start(tp);
3387
3388         tg3_full_unlock(tp);
3389
3390         if (restart_timer)
3391                 mod_timer(&tp->timer, jiffies + 1);
3392 }
3393
3394 static void tg3_tx_timeout(struct net_device *dev)
3395 {
3396         struct tg3 *tp = netdev_priv(dev);
3397
3398         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3399                dev->name);
3400
3401         schedule_work(&tp->reset_task);
3402 }
3403
3404 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3405
3406 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3407                                        u32 guilty_entry, int guilty_len,
3408                                        u32 last_plus_one, u32 *start, u32 mss)
3409 {
3410         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3411         dma_addr_t new_addr;
3412         u32 entry = *start;
3413         int i;
3414
3415         if (!new_skb) {
3416                 dev_kfree_skb(skb);
3417                 return -1;
3418         }
3419
3420         /* New SKB is guaranteed to be linear. */
3421         entry = *start;
3422         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3423                                   PCI_DMA_TODEVICE);
3424         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3425                     (skb->ip_summed == CHECKSUM_HW) ?
3426                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3427         *start = NEXT_TX(entry);
3428
3429         /* Now clean up the sw ring entries. */
3430         i = 0;
3431         while (entry != last_plus_one) {
3432                 int len;
3433
3434                 if (i == 0)
3435                         len = skb_headlen(skb);
3436                 else
3437                         len = skb_shinfo(skb)->frags[i-1].size;
3438                 pci_unmap_single(tp->pdev,
3439                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3440                                  len, PCI_DMA_TODEVICE);
3441                 if (i == 0) {
3442                         tp->tx_buffers[entry].skb = new_skb;
3443                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3444                 } else {
3445                         tp->tx_buffers[entry].skb = NULL;
3446                 }
3447                 entry = NEXT_TX(entry);
3448                 i++;
3449         }
3450
3451         dev_kfree_skb(skb);
3452
3453         return 0;
3454 }
3455
3456 static void tg3_set_txd(struct tg3 *tp, int entry,
3457                         dma_addr_t mapping, int len, u32 flags,
3458                         u32 mss_and_is_end)
3459 {
3460         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3461         int is_end = (mss_and_is_end & 0x1);
3462         u32 mss = (mss_and_is_end >> 1);
3463         u32 vlan_tag = 0;
3464
3465         if (is_end)
3466                 flags |= TXD_FLAG_END;
3467         if (flags & TXD_FLAG_VLAN) {
3468                 vlan_tag = flags >> 16;
3469                 flags &= 0xffff;
3470         }
3471         vlan_tag |= (mss << TXD_MSS_SHIFT);
3472
3473         txd->addr_hi = ((u64) mapping >> 32);
3474         txd->addr_lo = ((u64) mapping & 0xffffffff);
3475         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3476         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3477 }
3478
3479 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3480 {
3481         u32 base = (u32) mapping & 0xffffffff;
3482
3483         return ((base > 0xffffdcc0) &&
3484                 (base + len + 8 < base));
3485 }
3486
3487 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3488 {
3489         struct tg3 *tp = netdev_priv(dev);
3490         dma_addr_t mapping;
3491         unsigned int i;
3492         u32 len, entry, base_flags, mss;
3493         int would_hit_hwbug;
3494
3495         len = skb_headlen(skb);
3496
3497         /* No BH disabling for tx_lock here.  We are running in BH disabled
3498          * context and TX reclaim runs via tp->poll inside of a software
3499          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3500          * no IRQ context deadlocks to worry about either.  Rejoice!
3501          */
3502         if (!spin_trylock(&tp->tx_lock))
3503                 return NETDEV_TX_LOCKED; 
3504
3505         /* This is a hard error, log it. */
3506         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3507                 netif_stop_queue(dev);
3508                 spin_unlock(&tp->tx_lock);
3509                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3510                        dev->name);
3511                 return NETDEV_TX_BUSY;
3512         }
3513
3514         entry = tp->tx_prod;
3515         base_flags = 0;
3516         if (skb->ip_summed == CHECKSUM_HW)
3517                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3518 #if TG3_TSO_SUPPORT != 0
3519         mss = 0;
3520         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3521             (mss = skb_shinfo(skb)->tso_size) != 0) {
3522                 int tcp_opt_len, ip_tcp_len;
3523
3524                 if (skb_header_cloned(skb) &&
3525                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3526                         dev_kfree_skb(skb);
3527                         goto out_unlock;
3528                 }
3529
3530                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3531                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3532
3533                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3534                                TXD_FLAG_CPU_POST_DMA);
3535
3536                 skb->nh.iph->check = 0;
3537                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3538                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3539                         skb->h.th->check = 0;
3540                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3541                 }
3542                 else {
3543                         skb->h.th->check =
3544                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3545                                                    skb->nh.iph->daddr,
3546                                                    0, IPPROTO_TCP, 0);
3547                 }
3548
3549                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3550                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3551                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3552                                 int tsflags;
3553
3554                                 tsflags = ((skb->nh.iph->ihl - 5) +
3555                                            (tcp_opt_len >> 2));
3556                                 mss |= (tsflags << 11);
3557                         }
3558                 } else {
3559                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3560                                 int tsflags;
3561
3562                                 tsflags = ((skb->nh.iph->ihl - 5) +
3563                                            (tcp_opt_len >> 2));
3564                                 base_flags |= tsflags << 12;
3565                         }
3566                 }
3567         }
3568 #else
3569         mss = 0;
3570 #endif
3571 #if TG3_VLAN_TAG_USED
3572         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3573                 base_flags |= (TXD_FLAG_VLAN |
3574                                (vlan_tx_tag_get(skb) << 16));
3575 #endif
3576
3577         /* Queue skb data, a.k.a. the main skb fragment. */
3578         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3579
3580         tp->tx_buffers[entry].skb = skb;
3581         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3582
3583         would_hit_hwbug = 0;
3584
3585         if (tg3_4g_overflow_test(mapping, len))
3586                 would_hit_hwbug = entry + 1;
3587
3588         tg3_set_txd(tp, entry, mapping, len, base_flags,
3589                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3590
3591         entry = NEXT_TX(entry);
3592
3593         /* Now loop through additional data fragments, and queue them. */
3594         if (skb_shinfo(skb)->nr_frags > 0) {
3595                 unsigned int i, last;
3596
3597                 last = skb_shinfo(skb)->nr_frags - 1;
3598                 for (i = 0; i <= last; i++) {
3599                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3600
3601                         len = frag->size;
3602                         mapping = pci_map_page(tp->pdev,
3603                                                frag->page,
3604                                                frag->page_offset,
3605                                                len, PCI_DMA_TODEVICE);
3606
3607                         tp->tx_buffers[entry].skb = NULL;
3608                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3609
3610                         if (tg3_4g_overflow_test(mapping, len)) {
3611                                 /* Only one should match. */
3612                                 if (would_hit_hwbug)
3613                                         BUG();
3614                                 would_hit_hwbug = entry + 1;
3615                         }
3616
3617                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3618                                 tg3_set_txd(tp, entry, mapping, len,
3619                                             base_flags, (i == last)|(mss << 1));
3620                         else
3621                                 tg3_set_txd(tp, entry, mapping, len,
3622                                             base_flags, (i == last));
3623
3624                         entry = NEXT_TX(entry);
3625                 }
3626         }
3627
3628         if (would_hit_hwbug) {
3629                 u32 last_plus_one = entry;
3630                 u32 start;
3631                 unsigned int len = 0;
3632
3633                 would_hit_hwbug -= 1;
3634                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3635                 entry &= (TG3_TX_RING_SIZE - 1);
3636                 start = entry;
3637                 i = 0;
3638                 while (entry != last_plus_one) {
3639                         if (i == 0)
3640                                 len = skb_headlen(skb);
3641                         else
3642                                 len = skb_shinfo(skb)->frags[i-1].size;
3643
3644                         if (entry == would_hit_hwbug)
3645                                 break;
3646
3647                         i++;
3648                         entry = NEXT_TX(entry);
3649
3650                 }
3651
3652                 /* If the workaround fails due to memory/mapping
3653                  * failure, silently drop this packet.
3654                  */
3655                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3656                                                 entry, len,
3657                                                 last_plus_one,
3658                                                 &start, mss))
3659                         goto out_unlock;
3660
3661                 entry = start;
3662         }
3663
3664         /* Packets are ready, update Tx producer idx local and on card. */
3665         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3666
3667         tp->tx_prod = entry;
3668         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3669                 netif_stop_queue(dev);
3670
3671 out_unlock:
3672         mmiowb();
3673         spin_unlock(&tp->tx_lock);
3674
3675         dev->trans_start = jiffies;
3676
3677         return NETDEV_TX_OK;
3678 }
3679
3680 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3681                                int new_mtu)
3682 {
3683         dev->mtu = new_mtu;
3684
3685         if (new_mtu > ETH_DATA_LEN) {
3686                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
3687                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3688                         ethtool_op_set_tso(dev, 0);
3689                 }
3690                 else
3691                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3692         } else {
3693                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
3694                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3695                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3696         }
3697 }
3698
3699 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3700 {
3701         struct tg3 *tp = netdev_priv(dev);
3702
3703         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3704                 return -EINVAL;
3705
3706         if (!netif_running(dev)) {
3707                 /* We'll just catch it later when the
3708                  * device is up'd.
3709                  */
3710                 tg3_set_mtu(dev, tp, new_mtu);
3711                 return 0;
3712         }
3713
3714         tg3_netif_stop(tp);
3715
3716         tg3_full_lock(tp, 1);
3717
3718         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3719
3720         tg3_set_mtu(dev, tp, new_mtu);
3721
3722         tg3_init_hw(tp);
3723
3724         tg3_netif_start(tp);
3725
3726         tg3_full_unlock(tp);
3727
3728         return 0;
3729 }
3730
3731 /* Free up pending packets in all rx/tx rings.
3732  *
3733  * The chip has been shut down and the driver detached from
3734  * the networking, so no interrupts or new tx packets will
3735  * end up in the driver.  tp->{tx,}lock is not held and we are not
3736  * in an interrupt context and thus may sleep.
3737  */
3738 static void tg3_free_rings(struct tg3 *tp)
3739 {
3740         struct ring_info *rxp;
3741         int i;
3742
3743         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3744                 rxp = &tp->rx_std_buffers[i];
3745
3746                 if (rxp->skb == NULL)
3747                         continue;
3748                 pci_unmap_single(tp->pdev,
3749                                  pci_unmap_addr(rxp, mapping),
3750                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3751                                  PCI_DMA_FROMDEVICE);
3752                 dev_kfree_skb_any(rxp->skb);
3753                 rxp->skb = NULL;
3754         }
3755
3756         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3757                 rxp = &tp->rx_jumbo_buffers[i];
3758
3759                 if (rxp->skb == NULL)
3760                         continue;
3761                 pci_unmap_single(tp->pdev,
3762                                  pci_unmap_addr(rxp, mapping),
3763                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3764                                  PCI_DMA_FROMDEVICE);
3765                 dev_kfree_skb_any(rxp->skb);
3766                 rxp->skb = NULL;
3767         }
3768
3769         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3770                 struct tx_ring_info *txp;
3771                 struct sk_buff *skb;
3772                 int j;
3773
3774                 txp = &tp->tx_buffers[i];
3775                 skb = txp->skb;
3776
3777                 if (skb == NULL) {
3778                         i++;
3779                         continue;
3780                 }
3781
3782                 pci_unmap_single(tp->pdev,
3783                                  pci_unmap_addr(txp, mapping),
3784                                  skb_headlen(skb),
3785                                  PCI_DMA_TODEVICE);
3786                 txp->skb = NULL;
3787
3788                 i++;
3789
3790                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3791                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3792                         pci_unmap_page(tp->pdev,
3793                                        pci_unmap_addr(txp, mapping),
3794                                        skb_shinfo(skb)->frags[j].size,
3795                                        PCI_DMA_TODEVICE);
3796                         i++;
3797                 }
3798
3799                 dev_kfree_skb_any(skb);
3800         }
3801 }
3802
3803 /* Initialize tx/rx rings for packet processing.
3804  *
3805  * The chip has been shut down and the driver detached from
3806  * the networking, so no interrupts or new tx packets will
3807  * end up in the driver.  tp->{tx,}lock are held and thus
3808  * we may not sleep.
3809  */
3810 static void tg3_init_rings(struct tg3 *tp)
3811 {
3812         u32 i;
3813
3814         /* Free up all the SKBs. */
3815         tg3_free_rings(tp);
3816
3817         /* Zero out all descriptors. */
3818         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3819         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3820         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3821         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3822
3823         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3824         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) &&
3825             (tp->dev->mtu > ETH_DATA_LEN))
3826                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3827
3828         /* Initialize invariants of the rings, we only set this
3829          * stuff once.  This works because the card does not
3830          * write into the rx buffer posting rings.
3831          */
3832         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3833                 struct tg3_rx_buffer_desc *rxd;
3834
3835                 rxd = &tp->rx_std[i];
3836                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3837                         << RXD_LEN_SHIFT;
3838                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3839                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3840                                (i << RXD_OPAQUE_INDEX_SHIFT));
3841         }
3842
3843         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3844                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3845                         struct tg3_rx_buffer_desc *rxd;
3846
3847                         rxd = &tp->rx_jumbo[i];
3848                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3849                                 << RXD_LEN_SHIFT;
3850                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3851                                 RXD_FLAG_JUMBO;
3852                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3853                                (i << RXD_OPAQUE_INDEX_SHIFT));
3854                 }
3855         }
3856
3857         /* Now allocate fresh SKBs for each rx ring. */
3858         for (i = 0; i < tp->rx_pending; i++) {
3859                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3860                                      -1, i) < 0)
3861                         break;
3862         }
3863
3864         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3865                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3866                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3867                                              -1, i) < 0)
3868                                 break;
3869                 }
3870         }
3871 }
3872
3873 /*
3874  * Must not be invoked with interrupt sources disabled and
3875  * the hardware shutdown down.
3876  */
3877 static void tg3_free_consistent(struct tg3 *tp)
3878 {
3879         if (tp->rx_std_buffers) {
3880                 kfree(tp->rx_std_buffers);
3881                 tp->rx_std_buffers = NULL;
3882         }
3883         if (tp->rx_std) {
3884                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3885                                     tp->rx_std, tp->rx_std_mapping);
3886                 tp->rx_std = NULL;
3887         }
3888         if (tp->rx_jumbo) {
3889                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3890                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3891                 tp->rx_jumbo = NULL;
3892         }
3893         if (tp->rx_rcb) {
3894                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3895                                     tp->rx_rcb, tp->rx_rcb_mapping);
3896                 tp->rx_rcb = NULL;
3897         }
3898         if (tp->tx_ring) {
3899                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3900                         tp->tx_ring, tp->tx_desc_mapping);
3901                 tp->tx_ring = NULL;
3902         }
3903         if (tp->hw_status) {
3904                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3905                                     tp->hw_status, tp->status_mapping);
3906                 tp->hw_status = NULL;
3907         }
3908         if (tp->hw_stats) {
3909                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3910                                     tp->hw_stats, tp->stats_mapping);
3911                 tp->hw_stats = NULL;
3912         }
3913 }
3914
3915 /*
3916  * Must not be invoked with interrupt sources disabled and
3917  * the hardware shutdown down.  Can sleep.
3918  */
3919 static int tg3_alloc_consistent(struct tg3 *tp)
3920 {
3921         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3922                                       (TG3_RX_RING_SIZE +
3923                                        TG3_RX_JUMBO_RING_SIZE)) +
3924                                      (sizeof(struct tx_ring_info) *
3925                                       TG3_TX_RING_SIZE),
3926                                      GFP_KERNEL);
3927         if (!tp->rx_std_buffers)
3928                 return -ENOMEM;
3929
3930         memset(tp->rx_std_buffers, 0,
3931                (sizeof(struct ring_info) *
3932                 (TG3_RX_RING_SIZE +
3933                  TG3_RX_JUMBO_RING_SIZE)) +
3934                (sizeof(struct tx_ring_info) *
3935                 TG3_TX_RING_SIZE));
3936
3937         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3938         tp->tx_buffers = (struct tx_ring_info *)
3939                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3940
3941         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3942                                           &tp->rx_std_mapping);
3943         if (!tp->rx_std)
3944                 goto err_out;
3945
3946         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3947                                             &tp->rx_jumbo_mapping);
3948
3949         if (!tp->rx_jumbo)
3950                 goto err_out;
3951
3952         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3953                                           &tp->rx_rcb_mapping);
3954         if (!tp->rx_rcb)
3955                 goto err_out;
3956
3957         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3958                                            &tp->tx_desc_mapping);
3959         if (!tp->tx_ring)
3960                 goto err_out;
3961
3962         tp->hw_status = pci_alloc_consistent(tp->pdev,
3963                                              TG3_HW_STATUS_SIZE,
3964                                              &tp->status_mapping);
3965         if (!tp->hw_status)
3966                 goto err_out;
3967
3968         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3969                                             sizeof(struct tg3_hw_stats),
3970                                             &tp->stats_mapping);
3971         if (!tp->hw_stats)
3972                 goto err_out;
3973
3974         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3975         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3976
3977         return 0;
3978
3979 err_out:
3980         tg3_free_consistent(tp);
3981         return -ENOMEM;
3982 }
3983
3984 #define MAX_WAIT_CNT 1000
3985
3986 /* To stop a block, clear the enable bit and poll till it
3987  * clears.  tp->lock is held.
3988  */
3989 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
3990 {
3991         unsigned int i;
3992         u32 val;
3993
3994         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3995                 switch (ofs) {
3996                 case RCVLSC_MODE:
3997                 case DMAC_MODE:
3998                 case MBFREE_MODE:
3999                 case BUFMGR_MODE:
4000                 case MEMARB_MODE:
4001                         /* We can't enable/disable these bits of the
4002                          * 5705/5750, just say success.
4003                          */
4004                         return 0;
4005
4006                 default:
4007                         break;
4008                 };
4009         }
4010
4011         val = tr32(ofs);
4012         val &= ~enable_bit;
4013         tw32_f(ofs, val);
4014
4015         for (i = 0; i < MAX_WAIT_CNT; i++) {
4016                 udelay(100);
4017                 val = tr32(ofs);
4018                 if ((val & enable_bit) == 0)
4019                         break;
4020         }
4021
4022         if (i == MAX_WAIT_CNT && !silent) {
4023                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4024                        "ofs=%lx enable_bit=%x\n",
4025                        ofs, enable_bit);
4026                 return -ENODEV;
4027         }
4028
4029         return 0;
4030 }
4031
4032 /* tp->lock is held. */
4033 static int tg3_abort_hw(struct tg3 *tp, int silent)
4034 {
4035         int i, err;
4036
4037         tg3_disable_ints(tp);
4038
4039         tp->rx_mode &= ~RX_MODE_ENABLE;
4040         tw32_f(MAC_RX_MODE, tp->rx_mode);
4041         udelay(10);
4042
4043         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4044         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4045         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4046         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4047         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4048         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4049
4050         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4051         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4052         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4053         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4054         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4055         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4056         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4057
4058         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4059         tw32_f(MAC_MODE, tp->mac_mode);
4060         udelay(40);
4061
4062         tp->tx_mode &= ~TX_MODE_ENABLE;
4063         tw32_f(MAC_TX_MODE, tp->tx_mode);
4064
4065         for (i = 0; i < MAX_WAIT_CNT; i++) {
4066                 udelay(100);
4067                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4068                         break;
4069         }
4070         if (i >= MAX_WAIT_CNT) {
4071                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4072                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4073                        tp->dev->name, tr32(MAC_TX_MODE));
4074                 err |= -ENODEV;
4075         }
4076
4077         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4078         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4079         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4080
4081         tw32(FTQ_RESET, 0xffffffff);
4082         tw32(FTQ_RESET, 0x00000000);
4083
4084         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4085         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4086
4087         if (tp->hw_status)
4088                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4089         if (tp->hw_stats)
4090                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4091
4092         return err;
4093 }
4094
4095 /* tp->lock is held. */
4096 static int tg3_nvram_lock(struct tg3 *tp)
4097 {
4098         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4099                 int i;
4100
4101                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4102                 for (i = 0; i < 8000; i++) {
4103                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4104                                 break;
4105                         udelay(20);
4106                 }
4107                 if (i == 8000)
4108                         return -ENODEV;
4109         }
4110         return 0;
4111 }
4112
4113 /* tp->lock is held. */
4114 static void tg3_nvram_unlock(struct tg3 *tp)
4115 {
4116         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4117                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4118 }
4119
4120 /* tp->lock is held. */
4121 static void tg3_enable_nvram_access(struct tg3 *tp)
4122 {
4123         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4124             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4125                 u32 nvaccess = tr32(NVRAM_ACCESS);
4126
4127                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4128         }
4129 }
4130
4131 /* tp->lock is held. */
4132 static void tg3_disable_nvram_access(struct tg3 *tp)
4133 {
4134         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4135             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4136                 u32 nvaccess = tr32(NVRAM_ACCESS);
4137
4138                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4139         }
4140 }
4141
4142 /* tp->lock is held. */
4143 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4144 {
4145         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4146                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4147                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4148
4149         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4150                 switch (kind) {
4151                 case RESET_KIND_INIT:
4152                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4153                                       DRV_STATE_START);
4154                         break;
4155
4156                 case RESET_KIND_SHUTDOWN:
4157                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4158                                       DRV_STATE_UNLOAD);
4159                         break;
4160
4161                 case RESET_KIND_SUSPEND:
4162                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4163                                       DRV_STATE_SUSPEND);
4164                         break;
4165
4166                 default:
4167                         break;
4168                 };
4169         }
4170 }
4171
4172 /* tp->lock is held. */
4173 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4174 {
4175         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4176                 switch (kind) {
4177                 case RESET_KIND_INIT:
4178                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4179                                       DRV_STATE_START_DONE);
4180                         break;
4181
4182                 case RESET_KIND_SHUTDOWN:
4183                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4184                                       DRV_STATE_UNLOAD_DONE);
4185                         break;
4186
4187                 default:
4188                         break;
4189                 };
4190         }
4191 }
4192
4193 /* tp->lock is held. */
4194 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4195 {
4196         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4197                 switch (kind) {
4198                 case RESET_KIND_INIT:
4199                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4200                                       DRV_STATE_START);
4201                         break;
4202
4203                 case RESET_KIND_SHUTDOWN:
4204                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4205                                       DRV_STATE_UNLOAD);
4206                         break;
4207
4208                 case RESET_KIND_SUSPEND:
4209                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4210                                       DRV_STATE_SUSPEND);
4211                         break;
4212
4213                 default:
4214                         break;
4215                 };
4216         }
4217 }
4218
4219 static void tg3_stop_fw(struct tg3 *);
4220
4221 /* tp->lock is held. */
4222 static int tg3_chip_reset(struct tg3 *tp)
4223 {
4224         u32 val;
4225         u32 flags_save;
4226         int i;
4227
4228         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4229                 tg3_nvram_lock(tp);
4230
4231         /*
4232          * We must avoid the readl() that normally takes place.
4233          * It locks machines, causes machine checks, and other
4234          * fun things.  So, temporarily disable the 5701
4235          * hardware workaround, while we do the reset.
4236          */
4237         flags_save = tp->tg3_flags;
4238         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
4239
4240         /* do the reset */
4241         val = GRC_MISC_CFG_CORECLK_RESET;
4242
4243         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4244                 if (tr32(0x7e2c) == 0x60) {
4245                         tw32(0x7e2c, 0x20);
4246                 }
4247                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4248                         tw32(GRC_MISC_CFG, (1 << 29));
4249                         val |= (1 << 29);
4250                 }
4251         }
4252
4253         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4254                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4255         tw32(GRC_MISC_CFG, val);
4256
4257         /* restore 5701 hardware bug workaround flag */
4258         tp->tg3_flags = flags_save;
4259
4260         /* Unfortunately, we have to delay before the PCI read back.
4261          * Some 575X chips even will not respond to a PCI cfg access
4262          * when the reset command is given to the chip.
4263          *
4264          * How do these hardware designers expect things to work
4265          * properly if the PCI write is posted for a long period
4266          * of time?  It is always necessary to have some method by
4267          * which a register read back can occur to push the write
4268          * out which does the reset.
4269          *
4270          * For most tg3 variants the trick below was working.
4271          * Ho hum...
4272          */
4273         udelay(120);
4274
4275         /* Flush PCI posted writes.  The normal MMIO registers
4276          * are inaccessible at this time so this is the only
4277          * way to make this reliably (actually, this is no longer
4278          * the case, see above).  I tried to use indirect
4279          * register read/write but this upset some 5701 variants.
4280          */
4281         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4282
4283         udelay(120);
4284
4285         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4286                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4287                         int i;
4288                         u32 cfg_val;
4289
4290                         /* Wait for link training to complete.  */
4291                         for (i = 0; i < 5000; i++)
4292                                 udelay(100);
4293
4294                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4295                         pci_write_config_dword(tp->pdev, 0xc4,
4296                                                cfg_val | (1 << 15));
4297                 }
4298                 /* Set PCIE max payload size and clear error status.  */
4299                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4300         }
4301
4302         /* Re-enable indirect register accesses. */
4303         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4304                                tp->misc_host_ctrl);
4305
4306         /* Set MAX PCI retry to zero. */
4307         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4308         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4309             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4310                 val |= PCISTATE_RETRY_SAME_DMA;
4311         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4312
4313         pci_restore_state(tp->pdev);
4314
4315         /* Make sure PCI-X relaxed ordering bit is clear. */
4316         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4317         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4318         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4319
4320         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
4321                 u32 val;
4322
4323                 /* Chip reset on 5780 will reset MSI enable bit,
4324                  * so need to restore it.
4325                  */
4326                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4327                         u16 ctrl;
4328
4329                         pci_read_config_word(tp->pdev,
4330                                              tp->msi_cap + PCI_MSI_FLAGS,
4331                                              &ctrl);
4332                         pci_write_config_word(tp->pdev,
4333                                               tp->msi_cap + PCI_MSI_FLAGS,
4334                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4335                         val = tr32(MSGINT_MODE);
4336                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4337                 }
4338
4339                 val = tr32(MEMARB_MODE);
4340                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4341
4342         } else
4343                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4344
4345         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4346                 tg3_stop_fw(tp);
4347                 tw32(0x5000, 0x400);
4348         }
4349
4350         tw32(GRC_MODE, tp->grc_mode);
4351
4352         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4353                 u32 val = tr32(0xc4);
4354
4355                 tw32(0xc4, val | (1 << 15));
4356         }
4357
4358         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4359             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4360                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4361                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4362                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4363                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4364         }
4365
4366         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4367                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4368                 tw32_f(MAC_MODE, tp->mac_mode);
4369         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4370                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4371                 tw32_f(MAC_MODE, tp->mac_mode);
4372         } else
4373                 tw32_f(MAC_MODE, 0);
4374         udelay(40);
4375
4376         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4377                 /* Wait for firmware initialization to complete. */
4378                 for (i = 0; i < 100000; i++) {
4379                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4380                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4381                                 break;
4382                         udelay(10);
4383                 }
4384                 if (i >= 100000) {
4385                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4386                                "firmware will not restart magic=%08x\n",
4387                                tp->dev->name, val);
4388                         return -ENODEV;
4389                 }
4390         }
4391
4392         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4393             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4394                 u32 val = tr32(0x7c00);
4395
4396                 tw32(0x7c00, val | (1 << 25));
4397         }
4398
4399         /* Reprobe ASF enable state.  */
4400         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4401         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4402         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4403         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4404                 u32 nic_cfg;
4405
4406                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4407                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4408                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4409                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4410                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4411                 }
4412         }
4413
4414         return 0;
4415 }
4416
4417 /* tp->lock is held. */
4418 static void tg3_stop_fw(struct tg3 *tp)
4419 {
4420         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4421                 u32 val;
4422                 int i;
4423
4424                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4425                 val = tr32(GRC_RX_CPU_EVENT);
4426                 val |= (1 << 14);
4427                 tw32(GRC_RX_CPU_EVENT, val);
4428
4429                 /* Wait for RX cpu to ACK the event.  */
4430                 for (i = 0; i < 100; i++) {
4431                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4432                                 break;
4433                         udelay(1);
4434                 }
4435         }
4436 }
4437
4438 /* tp->lock is held. */
4439 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4440 {
4441         int err;
4442
4443         tg3_stop_fw(tp);
4444
4445         tg3_write_sig_pre_reset(tp, kind);
4446
4447         tg3_abort_hw(tp, silent);
4448         err = tg3_chip_reset(tp);
4449
4450         tg3_write_sig_legacy(tp, kind);
4451         tg3_write_sig_post_reset(tp, kind);
4452
4453         if (err)
4454                 return err;
4455
4456         return 0;
4457 }
4458
4459 #define TG3_FW_RELEASE_MAJOR    0x0
4460 #define TG3_FW_RELASE_MINOR     0x0
4461 #define TG3_FW_RELEASE_FIX      0x0
4462 #define TG3_FW_START_ADDR       0x08000000
4463 #define TG3_FW_TEXT_ADDR        0x08000000
4464 #define TG3_FW_TEXT_LEN         0x9c0
4465 #define TG3_FW_RODATA_ADDR      0x080009c0
4466 #define TG3_FW_RODATA_LEN       0x60
4467 #define TG3_FW_DATA_ADDR        0x08000a40
4468 #define TG3_FW_DATA_LEN         0x20
4469 #define TG3_FW_SBSS_ADDR        0x08000a60
4470 #define TG3_FW_SBSS_LEN         0xc
4471 #define TG3_FW_BSS_ADDR         0x08000a70
4472 #define TG3_FW_BSS_LEN          0x10
4473
4474 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4475         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4476         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4477         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4478         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4479         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4480         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4481         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4482         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4483         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4484         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4485         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4486         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4487         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4488         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4489         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4490         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4491         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4492         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4493         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4494         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4495         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4496         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4497         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4498         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4499         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4500         0, 0, 0, 0, 0, 0,
4501         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4502         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4503         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4504         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4505         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4506         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4507         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4508         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4509         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4510         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4511         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4512         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4513         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4514         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4515         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4516         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4517         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4518         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4519         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4520         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4521         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4522         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4523         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4524         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4525         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4526         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4527         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4528         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4529         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4530         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4531         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4532         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4533         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4534         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4535         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4536         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4537         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4538         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4539         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4540         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4541         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4542         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4543         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4544         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4545         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4546         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4547         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4548         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4549         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4550         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4551         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4552         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4553         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4554         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4555         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4556         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4557         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4558         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4559         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4560         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4561         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4562         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4563         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4564         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4565         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4566 };
4567
4568 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4569         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4570         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4571         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4572         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4573         0x00000000
4574 };
4575
4576 #if 0 /* All zeros, don't eat up space with it. */
4577 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4578         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4579         0x00000000, 0x00000000, 0x00000000, 0x00000000
4580 };
4581 #endif
4582
4583 #define RX_CPU_SCRATCH_BASE     0x30000
4584 #define RX_CPU_SCRATCH_SIZE     0x04000
4585 #define TX_CPU_SCRATCH_BASE     0x34000
4586 #define TX_CPU_SCRATCH_SIZE     0x04000
4587
4588 /* tp->lock is held. */
4589 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4590 {
4591         int i;
4592
4593         if (offset == TX_CPU_BASE &&
4594             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4595                 BUG();
4596
4597         if (offset == RX_CPU_BASE) {
4598                 for (i = 0; i < 10000; i++) {
4599                         tw32(offset + CPU_STATE, 0xffffffff);
4600                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4601                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4602                                 break;
4603                 }
4604
4605                 tw32(offset + CPU_STATE, 0xffffffff);
4606                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4607                 udelay(10);
4608         } else {
4609                 for (i = 0; i < 10000; i++) {
4610                         tw32(offset + CPU_STATE, 0xffffffff);
4611                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4612                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4613                                 break;
4614                 }
4615         }
4616
4617         if (i >= 10000) {
4618                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4619                        "and %s CPU\n",
4620                        tp->dev->name,
4621                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4622                 return -ENODEV;
4623         }
4624         return 0;
4625 }
4626
4627 struct fw_info {
4628         unsigned int text_base;
4629         unsigned int text_len;
4630         u32 *text_data;
4631         unsigned int rodata_base;
4632         unsigned int rodata_len;
4633         u32 *rodata_data;
4634         unsigned int data_base;
4635         unsigned int data_len;
4636         u32 *data_data;
4637 };
4638
4639 /* tp->lock is held. */
4640 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4641                                  int cpu_scratch_size, struct fw_info *info)
4642 {
4643         int err, i;
4644         u32 orig_tg3_flags = tp->tg3_flags;
4645         void (*write_op)(struct tg3 *, u32, u32);
4646
4647         if (cpu_base == TX_CPU_BASE &&
4648             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4649                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4650                        "TX cpu firmware on %s which is 5705.\n",
4651                        tp->dev->name);
4652                 return -EINVAL;
4653         }
4654
4655         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4656                 write_op = tg3_write_mem;
4657         else
4658                 write_op = tg3_write_indirect_reg32;
4659
4660         /* Force use of PCI config space for indirect register
4661          * write calls.
4662          */
4663         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4664
4665         /* It is possible that bootcode is still loading at this point.
4666          * Get the nvram lock first before halting the cpu.
4667          */
4668         tg3_nvram_lock(tp);
4669         err = tg3_halt_cpu(tp, cpu_base);
4670         tg3_nvram_unlock(tp);
4671         if (err)
4672                 goto out;
4673
4674         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4675                 write_op(tp, cpu_scratch_base + i, 0);
4676         tw32(cpu_base + CPU_STATE, 0xffffffff);
4677         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4678         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4679                 write_op(tp, (cpu_scratch_base +
4680                               (info->text_base & 0xffff) +
4681                               (i * sizeof(u32))),
4682                          (info->text_data ?
4683                           info->text_data[i] : 0));
4684         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4685                 write_op(tp, (cpu_scratch_base +
4686                               (info->rodata_base & 0xffff) +
4687                               (i * sizeof(u32))),
4688                          (info->rodata_data ?
4689                           info->rodata_data[i] : 0));
4690         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4691                 write_op(tp, (cpu_scratch_base +
4692                               (info->data_base & 0xffff) +
4693                               (i * sizeof(u32))),
4694                          (info->data_data ?
4695                           info->data_data[i] : 0));
4696
4697         err = 0;
4698
4699 out:
4700         tp->tg3_flags = orig_tg3_flags;
4701         return err;
4702 }
4703
4704 /* tp->lock is held. */
4705 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4706 {
4707         struct fw_info info;
4708         int err, i;
4709
4710         info.text_base = TG3_FW_TEXT_ADDR;
4711         info.text_len = TG3_FW_TEXT_LEN;
4712         info.text_data = &tg3FwText[0];
4713         info.rodata_base = TG3_FW_RODATA_ADDR;
4714         info.rodata_len = TG3_FW_RODATA_LEN;
4715         info.rodata_data = &tg3FwRodata[0];
4716         info.data_base = TG3_FW_DATA_ADDR;
4717         info.data_len = TG3_FW_DATA_LEN;
4718         info.data_data = NULL;
4719
4720         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4721                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4722                                     &info);
4723         if (err)
4724                 return err;
4725
4726         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4727                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4728                                     &info);
4729         if (err)
4730                 return err;
4731
4732         /* Now startup only the RX cpu. */
4733         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4734         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4735
4736         for (i = 0; i < 5; i++) {
4737                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4738                         break;
4739                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4740                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4741                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4742                 udelay(1000);
4743         }
4744         if (i >= 5) {
4745                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4746                        "to set RX CPU PC, is %08x should be %08x\n",
4747                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4748                        TG3_FW_TEXT_ADDR);
4749                 return -ENODEV;
4750         }
4751         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4752         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4753
4754         return 0;
4755 }
4756
4757 #if TG3_TSO_SUPPORT != 0
4758
4759 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4760 #define TG3_TSO_FW_RELASE_MINOR         0x6
4761 #define TG3_TSO_FW_RELEASE_FIX          0x0
4762 #define TG3_TSO_FW_START_ADDR           0x08000000
4763 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4764 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4765 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4766 #define TG3_TSO_FW_RODATA_LEN           0x60
4767 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4768 #define TG3_TSO_FW_DATA_LEN             0x30
4769 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4770 #define TG3_TSO_FW_SBSS_LEN             0x2c
4771 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4772 #define TG3_TSO_FW_BSS_LEN              0x894
4773
4774 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4775         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4776         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4777         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4778         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4779         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4780         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4781         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4782         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4783         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4784         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4785         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4786         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4787         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4788         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4789         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4790         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4791         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4792         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4793         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4794         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4795         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4796         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4797         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4798         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4799         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4800         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4801         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4802         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4803         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4804         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4805         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4806         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4807         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4808         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4809         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4810         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4811         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4812         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4813         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4814         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4815         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4816         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4817         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4818         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4819         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4820         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4821         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4822         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4823         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4824         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4825         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4826         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4827         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4828         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4829         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4830         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4831         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4832         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4833         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4834         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4835         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4836         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4837         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4838         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4839         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4840         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4841         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4842         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4843         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4844         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4845         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4846         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4847         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4848         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4849         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4850         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4851         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4852         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4853         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4854         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4855         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4856         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4857         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4858         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4859         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4860         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4861         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4862         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4863         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4864         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4865         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4866         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4867         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4868         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4869         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4870         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4871         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4872         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4873         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4874         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4875         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4876         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4877         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4878         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4879         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4880         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4881         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4882         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4883         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4884         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4885         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4886         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4887         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4888         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4889         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4890         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4891         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4892         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4893         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4894         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4895         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4896         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4897         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4898         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4899         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4900         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4901         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4902         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4903         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4904         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4905         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4906         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4907         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4908         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4909         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4910         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4911         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4912         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4913         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4914         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4915         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4916         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4917         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4918         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4919         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4920         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4921         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4922         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4923         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4924         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4925         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4926         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4927         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4928         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4929         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4930         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4931         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4932         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4933         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4934         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4935         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4936         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4937         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4938         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4939         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4940         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4941         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4942         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4943         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4944         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4945         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4946         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4947         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4948         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4949         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4950         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4951         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4952         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4953         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4954         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4955         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4956         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4957         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4958         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4959         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4960         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4961         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4962         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4963         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4964         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4965         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4966         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4967         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4968         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4969         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4970         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4971         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4972         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4973         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4974         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4975         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4976         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4977         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4978         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4979         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4980         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4981         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4982         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4983         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4984         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4985         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4986         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4987         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4988         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4989         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4990         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4991         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4992         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4993         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4994         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4995         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4996         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4997         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4998         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4999         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5000         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5001         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5002         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5003         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5004         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5005         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5006         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5007         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5008         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5009         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5010         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5011         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5012         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5013         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5014         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5015         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5016         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5017         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5018         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5019         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5020         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5021         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5022         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5023         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5024         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5025         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5026         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5027         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5028         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5029         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5030         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5031         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5032         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5033         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5034         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5035         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5036         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5037         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5038         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5039         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5040         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5041         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5042         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5043         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5044         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5045         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5046         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5047         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5048         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5049         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5050         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5051         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5052         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5053         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5054         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5055         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5056         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5057         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5058         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5059 };
5060
5061 static u32 tg3TsoFwRodata[] = {
5062         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5063         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5064         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5065         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5066         0x00000000,
5067 };
5068
5069 static u32 tg3TsoFwData[] = {
5070         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5071         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5072         0x00000000,
5073 };
5074
5075 /* 5705 needs a special version of the TSO firmware.  */
5076 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5077 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5078 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5079 #define TG3_TSO5_FW_START_ADDR          0x00010000
5080 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5081 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5082 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5083 #define TG3_TSO5_FW_RODATA_LEN          0x50
5084 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5085 #define TG3_TSO5_FW_DATA_LEN            0x20
5086 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5087 #define TG3_TSO5_FW_SBSS_LEN            0x28
5088 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5089 #define TG3_TSO5_FW_BSS_LEN             0x88
5090
5091 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5092         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5093         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5094         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5095         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5096         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5097         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5098         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5099         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5100         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5101         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5102         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5103         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5104         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5105         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5106         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5107         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5108         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5109         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5110         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5111         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5112         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5113         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5114         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5115         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5116         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5117         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5118         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5119         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5120         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5121         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5122         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5123         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5124         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5125         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5126         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5127         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5128         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5129         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5130         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5131         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5132         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5133         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5134         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5135         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5136         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5137         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5138         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5139         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5140         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5141         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5142         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5143         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5144         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5145         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5146         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5147         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5148         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5149         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5150         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5151         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5152         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5153         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5154         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5155         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5156         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5157         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5158         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5159         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5160         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5161         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5162         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5163         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5164         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5165         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5166         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5167         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5168         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5169         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5170         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5171         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5172         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5173         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5174         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5175         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5176         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5177         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5178         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5179         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5180         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5181         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5182         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5183         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5184         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5185         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5186         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5187         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5188         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5189         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5190         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5191         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5192         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5193         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5194         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5195         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5196         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5197         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5198         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5199         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5200         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5201         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5202         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5203         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5204         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5205         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5206         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5207         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5208         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5209         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5210         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5211         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5212         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5213         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5214         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5215         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5216         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5217         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5218         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5219         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5220         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5221         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5222         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5223         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5224         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5225         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5226         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5227         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5228         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5229         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5230         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5231         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5232         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5233         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5234         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5235         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5236         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5237         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5238         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5239         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5240         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5241         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5242         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5243         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5244         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5245         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5246         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5247         0x00000000, 0x00000000, 0x00000000,
5248 };
5249
5250 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5251         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5252         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5253         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5254         0x00000000, 0x00000000, 0x00000000,
5255 };
5256
5257 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5258         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5259         0x00000000, 0x00000000, 0x00000000,
5260 };
5261
5262 /* tp->lock is held. */
5263 static int tg3_load_tso_firmware(struct tg3 *tp)
5264 {
5265         struct fw_info info;
5266         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5267         int err, i;
5268
5269         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5270                 return 0;
5271
5272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5273                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5274                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5275                 info.text_data = &tg3Tso5FwText[0];
5276                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5277                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5278                 info.rodata_data = &tg3Tso5FwRodata[0];
5279                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5280                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5281                 info.data_data = &tg3Tso5FwData[0];
5282                 cpu_base = RX_CPU_BASE;
5283                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5284                 cpu_scratch_size = (info.text_len +
5285                                     info.rodata_len +
5286                                     info.data_len +
5287                                     TG3_TSO5_FW_SBSS_LEN +
5288                                     TG3_TSO5_FW_BSS_LEN);
5289         } else {
5290                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5291                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5292                 info.text_data = &tg3TsoFwText[0];
5293                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5294                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5295                 info.rodata_data = &tg3TsoFwRodata[0];
5296                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5297                 info.data_len = TG3_TSO_FW_DATA_LEN;
5298                 info.data_data = &tg3TsoFwData[0];
5299                 cpu_base = TX_CPU_BASE;
5300                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5301                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5302         }
5303
5304         err = tg3_load_firmware_cpu(tp, cpu_base,
5305                                     cpu_scratch_base, cpu_scratch_size,
5306                                     &info);
5307         if (err)
5308                 return err;
5309
5310         /* Now startup the cpu. */
5311         tw32(cpu_base + CPU_STATE, 0xffffffff);
5312         tw32_f(cpu_base + CPU_PC,    info.text_base);
5313
5314         for (i = 0; i < 5; i++) {
5315                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5316                         break;
5317                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5318                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5319                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5320                 udelay(1000);
5321         }
5322         if (i >= 5) {
5323                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5324                        "to set CPU PC, is %08x should be %08x\n",
5325                        tp->dev->name, tr32(cpu_base + CPU_PC),
5326                        info.text_base);
5327                 return -ENODEV;
5328         }
5329         tw32(cpu_base + CPU_STATE, 0xffffffff);
5330         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5331         return 0;
5332 }
5333
5334 #endif /* TG3_TSO_SUPPORT != 0 */
5335
5336 /* tp->lock is held. */
5337 static void __tg3_set_mac_addr(struct tg3 *tp)
5338 {
5339         u32 addr_high, addr_low;
5340         int i;
5341
5342         addr_high = ((tp->dev->dev_addr[0] << 8) |
5343                      tp->dev->dev_addr[1]);
5344         addr_low = ((tp->dev->dev_addr[2] << 24) |
5345                     (tp->dev->dev_addr[3] << 16) |
5346                     (tp->dev->dev_addr[4] <<  8) |
5347                     (tp->dev->dev_addr[5] <<  0));
5348         for (i = 0; i < 4; i++) {
5349                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5350                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5351         }
5352
5353         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5354             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5355                 for (i = 0; i < 12; i++) {
5356                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5357                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5358                 }
5359         }
5360
5361         addr_high = (tp->dev->dev_addr[0] +
5362                      tp->dev->dev_addr[1] +
5363                      tp->dev->dev_addr[2] +
5364                      tp->dev->dev_addr[3] +
5365                      tp->dev->dev_addr[4] +
5366                      tp->dev->dev_addr[5]) &
5367                 TX_BACKOFF_SEED_MASK;
5368         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5369 }
5370
5371 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5372 {
5373         struct tg3 *tp = netdev_priv(dev);
5374         struct sockaddr *addr = p;
5375
5376         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5377
5378         spin_lock_bh(&tp->lock);
5379         __tg3_set_mac_addr(tp);
5380         spin_unlock_bh(&tp->lock);
5381
5382         return 0;
5383 }
5384
5385 /* tp->lock is held. */
5386 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5387                            dma_addr_t mapping, u32 maxlen_flags,
5388                            u32 nic_addr)
5389 {
5390         tg3_write_mem(tp,
5391                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5392                       ((u64) mapping >> 32));
5393         tg3_write_mem(tp,
5394                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5395                       ((u64) mapping & 0xffffffff));
5396         tg3_write_mem(tp,
5397                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5398                        maxlen_flags);
5399
5400         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5401                 tg3_write_mem(tp,
5402                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5403                               nic_addr);
5404 }
5405
5406 static void __tg3_set_rx_mode(struct net_device *);
5407 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5408 {
5409         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5410         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5411         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5412         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5413         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5414                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5415                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5416         }
5417         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5418         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5419         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5420                 u32 val = ec->stats_block_coalesce_usecs;
5421
5422                 if (!netif_carrier_ok(tp->dev))
5423                         val = 0;
5424
5425                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5426         }
5427 }
5428
5429 /* tp->lock is held. */
5430 static int tg3_reset_hw(struct tg3 *tp)
5431 {
5432         u32 val, rdmac_mode;
5433         int i, err, limit;
5434
5435         tg3_disable_ints(tp);
5436
5437         tg3_stop_fw(tp);
5438
5439         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5440
5441         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5442                 tg3_abort_hw(tp, 1);
5443         }
5444
5445         err = tg3_chip_reset(tp);
5446         if (err)
5447                 return err;
5448
5449         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5450
5451         /* This works around an issue with Athlon chipsets on
5452          * B3 tigon3 silicon.  This bit has no effect on any
5453          * other revision.  But do not set this on PCI Express
5454          * chips.
5455          */
5456         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5457                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5458         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5459
5460         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5461             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5462                 val = tr32(TG3PCI_PCISTATE);
5463                 val |= PCISTATE_RETRY_SAME_DMA;
5464                 tw32(TG3PCI_PCISTATE, val);
5465         }
5466
5467         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5468                 /* Enable some hw fixes.  */
5469                 val = tr32(TG3PCI_MSI_DATA);
5470                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5471                 tw32(TG3PCI_MSI_DATA, val);
5472         }
5473
5474         /* Descriptor ring init may make accesses to the
5475          * NIC SRAM area to setup the TX descriptors, so we
5476          * can only do this after the hardware has been
5477          * successfully reset.
5478          */
5479         tg3_init_rings(tp);
5480
5481         /* This value is determined during the probe time DMA
5482          * engine test, tg3_test_dma.
5483          */
5484         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5485
5486         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5487                           GRC_MODE_4X_NIC_SEND_RINGS |
5488                           GRC_MODE_NO_TX_PHDR_CSUM |
5489                           GRC_MODE_NO_RX_PHDR_CSUM);
5490         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5491         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5492                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5493         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5494                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5495
5496         tw32(GRC_MODE,
5497              tp->grc_mode |
5498              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5499
5500         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5501         val = tr32(GRC_MISC_CFG);
5502         val &= ~0xff;
5503         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5504         tw32(GRC_MISC_CFG, val);
5505
5506         /* Initialize MBUF/DESC pool. */
5507         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5508                 /* Do nothing.  */
5509         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5510                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5511                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5512                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5513                 else
5514                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5515                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5516                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5517         }
5518 #if TG3_TSO_SUPPORT != 0
5519         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5520                 int fw_len;
5521
5522                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5523                           TG3_TSO5_FW_RODATA_LEN +
5524                           TG3_TSO5_FW_DATA_LEN +
5525                           TG3_TSO5_FW_SBSS_LEN +
5526                           TG3_TSO5_FW_BSS_LEN);
5527                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5528                 tw32(BUFMGR_MB_POOL_ADDR,
5529                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5530                 tw32(BUFMGR_MB_POOL_SIZE,
5531                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5532         }
5533 #endif
5534
5535         if (tp->dev->mtu <= ETH_DATA_LEN) {
5536                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5537                      tp->bufmgr_config.mbuf_read_dma_low_water);
5538                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5539                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5540                 tw32(BUFMGR_MB_HIGH_WATER,
5541                      tp->bufmgr_config.mbuf_high_water);
5542         } else {
5543                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5544                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5545                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5546                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5547                 tw32(BUFMGR_MB_HIGH_WATER,
5548                      tp->bufmgr_config.mbuf_high_water_jumbo);
5549         }
5550         tw32(BUFMGR_DMA_LOW_WATER,
5551              tp->bufmgr_config.dma_low_water);
5552         tw32(BUFMGR_DMA_HIGH_WATER,
5553              tp->bufmgr_config.dma_high_water);
5554
5555         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5556         for (i = 0; i < 2000; i++) {
5557                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5558                         break;
5559                 udelay(10);
5560         }
5561         if (i >= 2000) {
5562                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5563                        tp->dev->name);
5564                 return -ENODEV;
5565         }
5566
5567         /* Setup replenish threshold. */
5568         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5569
5570         /* Initialize TG3_BDINFO's at:
5571          *  RCVDBDI_STD_BD:     standard eth size rx ring
5572          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5573          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5574          *
5575          * like so:
5576          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5577          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5578          *                              ring attribute flags
5579          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5580          *
5581          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5582          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5583          *
5584          * The size of each ring is fixed in the firmware, but the location is
5585          * configurable.
5586          */
5587         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5588              ((u64) tp->rx_std_mapping >> 32));
5589         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5590              ((u64) tp->rx_std_mapping & 0xffffffff));
5591         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5592              NIC_SRAM_RX_BUFFER_DESC);
5593
5594         /* Don't even try to program the JUMBO/MINI buffer descriptor
5595          * configs on 5705.
5596          */
5597         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5598                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5599                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5600         } else {
5601                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5602                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5603
5604                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5605                      BDINFO_FLAGS_DISABLED);
5606
5607                 /* Setup replenish threshold. */
5608                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5609
5610                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5611                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5612                              ((u64) tp->rx_jumbo_mapping >> 32));
5613                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5614                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5615                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5616                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5617                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5618                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5619                 } else {
5620                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5621                              BDINFO_FLAGS_DISABLED);
5622                 }
5623
5624         }
5625
5626         /* There is only one send ring on 5705/5750, no need to explicitly
5627          * disable the others.
5628          */
5629         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5630                 /* Clear out send RCB ring in SRAM. */
5631                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5632                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5633                                       BDINFO_FLAGS_DISABLED);
5634         }
5635
5636         tp->tx_prod = 0;
5637         tp->tx_cons = 0;
5638         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5639         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5640
5641         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5642                        tp->tx_desc_mapping,
5643                        (TG3_TX_RING_SIZE <<
5644                         BDINFO_FLAGS_MAXLEN_SHIFT),
5645                        NIC_SRAM_TX_BUFFER_DESC);
5646
5647         /* There is only one receive return ring on 5705/5750, no need
5648          * to explicitly disable the others.
5649          */
5650         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5651                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5652                      i += TG3_BDINFO_SIZE) {
5653                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5654                                       BDINFO_FLAGS_DISABLED);
5655                 }
5656         }
5657
5658         tp->rx_rcb_ptr = 0;
5659         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5660
5661         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5662                        tp->rx_rcb_mapping,
5663                        (TG3_RX_RCB_RING_SIZE(tp) <<
5664                         BDINFO_FLAGS_MAXLEN_SHIFT),
5665                        0);
5666
5667         tp->rx_std_ptr = tp->rx_pending;
5668         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5669                      tp->rx_std_ptr);
5670
5671         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5672                                                 tp->rx_jumbo_pending : 0;
5673         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5674                      tp->rx_jumbo_ptr);
5675
5676         /* Initialize MAC address and backoff seed. */
5677         __tg3_set_mac_addr(tp);
5678
5679         /* MTU + ethernet header + FCS + optional VLAN tag */
5680         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5681
5682         /* The slot time is changed by tg3_setup_phy if we
5683          * run at gigabit with half duplex.
5684          */
5685         tw32(MAC_TX_LENGTHS,
5686              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5687              (6 << TX_LENGTHS_IPG_SHIFT) |
5688              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5689
5690         /* Receive rules. */
5691         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5692         tw32(RCVLPC_CONFIG, 0x0181);
5693
5694         /* Calculate RDMAC_MODE setting early, we need it to determine
5695          * the RCVLPC_STATE_ENABLE mask.
5696          */
5697         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5698                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5699                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5700                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5701                       RDMAC_MODE_LNGREAD_ENAB);
5702         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5703                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5704
5705         /* If statement applies to 5705 and 5750 PCI devices only */
5706         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5707              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5708             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5709                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5710                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5711                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5712                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5713                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5714                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5715                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5716                 }
5717         }
5718
5719         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5720                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5721
5722 #if TG3_TSO_SUPPORT != 0
5723         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5724                 rdmac_mode |= (1 << 27);
5725 #endif
5726
5727         /* Receive/send statistics. */
5728         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5729             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5730                 val = tr32(RCVLPC_STATS_ENABLE);
5731                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5732                 tw32(RCVLPC_STATS_ENABLE, val);
5733         } else {
5734                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5735         }
5736         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5737         tw32(SNDDATAI_STATSENAB, 0xffffff);
5738         tw32(SNDDATAI_STATSCTRL,
5739              (SNDDATAI_SCTRL_ENABLE |
5740               SNDDATAI_SCTRL_FASTUPD));
5741
5742         /* Setup host coalescing engine. */
5743         tw32(HOSTCC_MODE, 0);
5744         for (i = 0; i < 2000; i++) {
5745                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5746                         break;
5747                 udelay(10);
5748         }
5749
5750         __tg3_set_coalesce(tp, &tp->coal);
5751
5752         /* set status block DMA address */
5753         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5754              ((u64) tp->status_mapping >> 32));
5755         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5756              ((u64) tp->status_mapping & 0xffffffff));
5757
5758         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5759                 /* Status/statistics block address.  See tg3_timer,
5760                  * the tg3_periodic_fetch_stats call there, and
5761                  * tg3_get_stats to see how this works for 5705/5750 chips.
5762                  */
5763                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5764                      ((u64) tp->stats_mapping >> 32));
5765                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5766                      ((u64) tp->stats_mapping & 0xffffffff));
5767                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5768                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5769         }
5770
5771         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5772
5773         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5774         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5775         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5776                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5777
5778         /* Clear statistics/status block in chip, and status block in ram. */
5779         for (i = NIC_SRAM_STATS_BLK;
5780              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5781              i += sizeof(u32)) {
5782                 tg3_write_mem(tp, i, 0);
5783                 udelay(40);
5784         }
5785         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5786
5787         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5788                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5789         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5790         udelay(40);
5791
5792         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5793          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5794          * register to preserve the GPIO settings for LOMs. The GPIOs,
5795          * whether used as inputs or outputs, are set by boot code after
5796          * reset.
5797          */
5798         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5799                 u32 gpio_mask;
5800
5801                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5802                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5803
5804                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5805                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5806                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5807
5808                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5809
5810                 /* GPIO1 must be driven high for eeprom write protect */
5811                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5812                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5813         }
5814         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5815         udelay(100);
5816
5817         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5818         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5819         tp->last_tag = 0;
5820
5821         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5822                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5823                 udelay(40);
5824         }
5825
5826         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5827                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5828                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5829                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5830                WDMAC_MODE_LNGREAD_ENAB);
5831
5832         /* If statement applies to 5705 and 5750 PCI devices only */
5833         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5834              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5835             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5836                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5837                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5838                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5839                         /* nothing */
5840                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5841                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5842                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5843                         val |= WDMAC_MODE_RX_ACCEL;
5844                 }
5845         }
5846
5847         tw32_f(WDMAC_MODE, val);
5848         udelay(40);
5849
5850         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5851                 val = tr32(TG3PCI_X_CAPS);
5852                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5853                         val &= ~PCIX_CAPS_BURST_MASK;
5854                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5855                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5856                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5857                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5858                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5859                                 val |= (tp->split_mode_max_reqs <<
5860                                         PCIX_CAPS_SPLIT_SHIFT);
5861                 }
5862                 tw32(TG3PCI_X_CAPS, val);
5863         }
5864
5865         tw32_f(RDMAC_MODE, rdmac_mode);
5866         udelay(40);
5867
5868         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5869         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5870                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5871         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5872         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5873         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5874         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5875         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5876 #if TG3_TSO_SUPPORT != 0
5877         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5878                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5879 #endif
5880         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5881         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5882
5883         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5884                 err = tg3_load_5701_a0_firmware_fix(tp);
5885                 if (err)
5886                         return err;
5887         }
5888
5889 #if TG3_TSO_SUPPORT != 0
5890         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5891                 err = tg3_load_tso_firmware(tp);
5892                 if (err)
5893                         return err;
5894         }
5895 #endif
5896
5897         tp->tx_mode = TX_MODE_ENABLE;
5898         tw32_f(MAC_TX_MODE, tp->tx_mode);
5899         udelay(100);
5900
5901         tp->rx_mode = RX_MODE_ENABLE;
5902         tw32_f(MAC_RX_MODE, tp->rx_mode);
5903         udelay(10);
5904
5905         if (tp->link_config.phy_is_low_power) {
5906                 tp->link_config.phy_is_low_power = 0;
5907                 tp->link_config.speed = tp->link_config.orig_speed;
5908                 tp->link_config.duplex = tp->link_config.orig_duplex;
5909                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5910         }
5911
5912         tp->mi_mode = MAC_MI_MODE_BASE;
5913         tw32_f(MAC_MI_MODE, tp->mi_mode);
5914         udelay(80);
5915
5916         tw32(MAC_LED_CTRL, tp->led_ctrl);
5917
5918         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5919         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5920                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5921                 udelay(10);
5922         }
5923         tw32_f(MAC_RX_MODE, tp->rx_mode);
5924         udelay(10);
5925
5926         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5927                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5928                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5929                         /* Set drive transmission level to 1.2V  */
5930                         /* only if the signal pre-emphasis bit is not set  */
5931                         val = tr32(MAC_SERDES_CFG);
5932                         val &= 0xfffff000;
5933                         val |= 0x880;
5934                         tw32(MAC_SERDES_CFG, val);
5935                 }
5936                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5937                         tw32(MAC_SERDES_CFG, 0x616000);
5938         }
5939
5940         /* Prevent chip from dropping frames when flow control
5941          * is enabled.
5942          */
5943         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5944
5945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5946             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5947                 /* Use hardware link auto-negotiation */
5948                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5949         }
5950
5951         err = tg3_setup_phy(tp, 1);
5952         if (err)
5953                 return err;
5954
5955         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5956                 u32 tmp;
5957
5958                 /* Clear CRC stats. */
5959                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5960                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5961                         tg3_readphy(tp, 0x14, &tmp);
5962                 }
5963         }
5964
5965         __tg3_set_rx_mode(tp->dev);
5966
5967         /* Initialize receive rules. */
5968         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5969         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5970         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5971         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5972
5973         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
5974             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780))
5975                 limit = 8;
5976         else
5977                 limit = 16;
5978         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5979                 limit -= 4;
5980         switch (limit) {
5981         case 16:
5982                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5983         case 15:
5984                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5985         case 14:
5986                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5987         case 13:
5988                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5989         case 12:
5990                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5991         case 11:
5992                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5993         case 10:
5994                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5995         case 9:
5996                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5997         case 8:
5998                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5999         case 7:
6000                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6001         case 6:
6002                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6003         case 5:
6004                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6005         case 4:
6006                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6007         case 3:
6008                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6009         case 2:
6010         case 1:
6011
6012         default:
6013                 break;
6014         };
6015
6016         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6017
6018         return 0;
6019 }
6020
6021 /* Called at device open time to get the chip ready for
6022  * packet processing.  Invoked with tp->lock held.
6023  */
6024 static int tg3_init_hw(struct tg3 *tp)
6025 {
6026         int err;
6027
6028         /* Force the chip into D0. */
6029         err = tg3_set_power_state(tp, 0);
6030         if (err)
6031                 goto out;
6032
6033         tg3_switch_clocks(tp);
6034
6035         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6036
6037         err = tg3_reset_hw(tp);
6038
6039 out:
6040         return err;
6041 }
6042
6043 #define TG3_STAT_ADD32(PSTAT, REG) \
6044 do {    u32 __val = tr32(REG); \
6045         (PSTAT)->low += __val; \
6046         if ((PSTAT)->low < __val) \
6047                 (PSTAT)->high += 1; \
6048 } while (0)
6049
6050 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6051 {
6052         struct tg3_hw_stats *sp = tp->hw_stats;
6053
6054         if (!netif_carrier_ok(tp->dev))
6055                 return;
6056
6057         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6058         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6059         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6060         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6061         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6062         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6063         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6064         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6065         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6066         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6067         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6068         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6069         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6070
6071         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6072         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6073         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6074         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6075         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6076         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6077         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6078         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6079         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6080         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6081         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6082         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6083         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6084         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6085 }
6086
6087 static void tg3_timer(unsigned long __opaque)
6088 {
6089         struct tg3 *tp = (struct tg3 *) __opaque;
6090
6091         spin_lock(&tp->lock);
6092
6093         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6094                 /* All of this garbage is because when using non-tagged
6095                  * IRQ status the mailbox/status_block protocol the chip
6096                  * uses with the cpu is race prone.
6097                  */
6098                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6099                         tw32(GRC_LOCAL_CTRL,
6100                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6101                 } else {
6102                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6103                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6104                 }
6105
6106                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6107                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6108                         spin_unlock(&tp->lock);
6109                         schedule_work(&tp->reset_task);
6110                         return;
6111                 }
6112         }
6113
6114         /* This part only runs once per second. */
6115         if (!--tp->timer_counter) {
6116                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6117                         tg3_periodic_fetch_stats(tp);
6118
6119                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6120                         u32 mac_stat;
6121                         int phy_event;
6122
6123                         mac_stat = tr32(MAC_STATUS);
6124
6125                         phy_event = 0;
6126                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6127                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6128                                         phy_event = 1;
6129                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6130                                 phy_event = 1;
6131
6132                         if (phy_event)
6133                                 tg3_setup_phy(tp, 0);
6134                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6135                         u32 mac_stat = tr32(MAC_STATUS);
6136                         int need_setup = 0;
6137
6138                         if (netif_carrier_ok(tp->dev) &&
6139                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6140                                 need_setup = 1;
6141                         }
6142                         if (! netif_carrier_ok(tp->dev) &&
6143                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6144                                          MAC_STATUS_SIGNAL_DET))) {
6145                                 need_setup = 1;
6146                         }
6147                         if (need_setup) {
6148                                 tw32_f(MAC_MODE,
6149                                      (tp->mac_mode &
6150                                       ~MAC_MODE_PORT_MODE_MASK));
6151                                 udelay(40);
6152                                 tw32_f(MAC_MODE, tp->mac_mode);
6153                                 udelay(40);
6154                                 tg3_setup_phy(tp, 0);
6155                         }
6156                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6157                         tg3_serdes_parallel_detect(tp);
6158
6159                 tp->timer_counter = tp->timer_multiplier;
6160         }
6161
6162         /* Heartbeat is only sent once every 120 seconds.  */
6163         if (!--tp->asf_counter) {
6164                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6165                         u32 val;
6166
6167                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
6168                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6169                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
6170                         val = tr32(GRC_RX_CPU_EVENT);
6171                         val |= (1 << 14);
6172                         tw32(GRC_RX_CPU_EVENT, val);
6173                 }
6174                 tp->asf_counter = tp->asf_multiplier;
6175         }
6176
6177         spin_unlock(&tp->lock);
6178
6179         tp->timer.expires = jiffies + tp->timer_offset;
6180         add_timer(&tp->timer);
6181 }
6182
6183 static int tg3_test_interrupt(struct tg3 *tp)
6184 {
6185         struct net_device *dev = tp->dev;
6186         int err, i;
6187         u32 int_mbox = 0;
6188
6189         if (!netif_running(dev))
6190                 return -ENODEV;
6191
6192         tg3_disable_ints(tp);
6193
6194         free_irq(tp->pdev->irq, dev);
6195
6196         err = request_irq(tp->pdev->irq, tg3_test_isr,
6197                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6198         if (err)
6199                 return err;
6200
6201         tg3_enable_ints(tp);
6202
6203         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6204                HOSTCC_MODE_NOW);
6205
6206         for (i = 0; i < 5; i++) {
6207                 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
6208                 if (int_mbox != 0)
6209                         break;
6210                 msleep(10);
6211         }
6212
6213         tg3_disable_ints(tp);
6214
6215         free_irq(tp->pdev->irq, dev);
6216         
6217         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6218                 err = request_irq(tp->pdev->irq, tg3_msi,
6219                                   SA_SAMPLE_RANDOM, dev->name, dev);
6220         else {
6221                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6222                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6223                         fn = tg3_interrupt_tagged;
6224                 err = request_irq(tp->pdev->irq, fn,
6225                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6226         }
6227
6228         if (err)
6229                 return err;
6230
6231         if (int_mbox != 0)
6232                 return 0;
6233
6234         return -EIO;
6235 }
6236
6237 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6238  * successfully restored
6239  */
6240 static int tg3_test_msi(struct tg3 *tp)
6241 {
6242         struct net_device *dev = tp->dev;
6243         int err;
6244         u16 pci_cmd;
6245
6246         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6247                 return 0;
6248
6249         /* Turn off SERR reporting in case MSI terminates with Master
6250          * Abort.
6251          */
6252         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6253         pci_write_config_word(tp->pdev, PCI_COMMAND,
6254                               pci_cmd & ~PCI_COMMAND_SERR);
6255
6256         err = tg3_test_interrupt(tp);
6257
6258         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6259
6260         if (!err)
6261                 return 0;
6262
6263         /* other failures */
6264         if (err != -EIO)
6265                 return err;
6266
6267         /* MSI test failed, go back to INTx mode */
6268         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6269                "switching to INTx mode. Please report this failure to "
6270                "the PCI maintainer and include system chipset information.\n",
6271                        tp->dev->name);
6272
6273         free_irq(tp->pdev->irq, dev);
6274         pci_disable_msi(tp->pdev);
6275
6276         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6277
6278         {
6279                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6280                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6281                         fn = tg3_interrupt_tagged;
6282
6283                 err = request_irq(tp->pdev->irq, fn,
6284                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6285         }
6286         if (err)
6287                 return err;
6288
6289         /* Need to reset the chip because the MSI cycle may have terminated
6290          * with Master Abort.
6291          */
6292         tg3_full_lock(tp, 1);
6293
6294         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6295         err = tg3_init_hw(tp);
6296
6297         tg3_full_unlock(tp);
6298
6299         if (err)
6300                 free_irq(tp->pdev->irq, dev);
6301
6302         return err;
6303 }
6304
6305 static int tg3_open(struct net_device *dev)
6306 {
6307         struct tg3 *tp = netdev_priv(dev);
6308         int err;
6309
6310         tg3_full_lock(tp, 0);
6311
6312         tg3_disable_ints(tp);
6313         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6314
6315         tg3_full_unlock(tp);
6316
6317         /* The placement of this call is tied
6318          * to the setup and use of Host TX descriptors.
6319          */
6320         err = tg3_alloc_consistent(tp);
6321         if (err)
6322                 return err;
6323
6324         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6325             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6326             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6327                 /* All MSI supporting chips should support tagged
6328                  * status.  Assert that this is the case.
6329                  */
6330                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6331                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6332                                "Not using MSI.\n", tp->dev->name);
6333                 } else if (pci_enable_msi(tp->pdev) == 0) {
6334                         u32 msi_mode;
6335
6336                         msi_mode = tr32(MSGINT_MODE);
6337                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6338                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6339                 }
6340         }
6341         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6342                 err = request_irq(tp->pdev->irq, tg3_msi,
6343                                   SA_SAMPLE_RANDOM, dev->name, dev);
6344         else {
6345                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6346                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6347                         fn = tg3_interrupt_tagged;
6348
6349                 err = request_irq(tp->pdev->irq, fn,
6350                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6351         }
6352
6353         if (err) {
6354                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6355                         pci_disable_msi(tp->pdev);
6356                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6357                 }
6358                 tg3_free_consistent(tp);
6359                 return err;
6360         }
6361
6362         tg3_full_lock(tp, 0);
6363
6364         err = tg3_init_hw(tp);
6365         if (err) {
6366                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6367                 tg3_free_rings(tp);
6368         } else {
6369                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6370                         tp->timer_offset = HZ;
6371                 else
6372                         tp->timer_offset = HZ / 10;
6373
6374                 BUG_ON(tp->timer_offset > HZ);
6375                 tp->timer_counter = tp->timer_multiplier =
6376                         (HZ / tp->timer_offset);
6377                 tp->asf_counter = tp->asf_multiplier =
6378                         ((HZ / tp->timer_offset) * 120);
6379
6380                 init_timer(&tp->timer);
6381                 tp->timer.expires = jiffies + tp->timer_offset;
6382                 tp->timer.data = (unsigned long) tp;
6383                 tp->timer.function = tg3_timer;
6384         }
6385
6386         tg3_full_unlock(tp);
6387
6388         if (err) {
6389                 free_irq(tp->pdev->irq, dev);
6390                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6391                         pci_disable_msi(tp->pdev);
6392                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6393                 }
6394                 tg3_free_consistent(tp);
6395                 return err;
6396         }
6397
6398         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6399                 err = tg3_test_msi(tp);
6400
6401                 if (err) {
6402                         tg3_full_lock(tp, 0);
6403
6404                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6405                                 pci_disable_msi(tp->pdev);
6406                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6407                         }
6408                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6409                         tg3_free_rings(tp);
6410                         tg3_free_consistent(tp);
6411
6412                         tg3_full_unlock(tp);
6413
6414                         return err;
6415                 }
6416         }
6417
6418         tg3_full_lock(tp, 0);
6419
6420         add_timer(&tp->timer);
6421         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6422         tg3_enable_ints(tp);
6423
6424         tg3_full_unlock(tp);
6425
6426         netif_start_queue(dev);
6427
6428         return 0;
6429 }
6430
6431 #if 0
6432 /*static*/ void tg3_dump_state(struct tg3 *tp)
6433 {
6434         u32 val32, val32_2, val32_3, val32_4, val32_5;
6435         u16 val16;
6436         int i;
6437
6438         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6439         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6440         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6441                val16, val32);
6442
6443         /* MAC block */
6444         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6445                tr32(MAC_MODE), tr32(MAC_STATUS));
6446         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6447                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6448         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6449                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6450         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6451                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6452
6453         /* Send data initiator control block */
6454         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6455                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6456         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6457                tr32(SNDDATAI_STATSCTRL));
6458
6459         /* Send data completion control block */
6460         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6461
6462         /* Send BD ring selector block */
6463         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6464                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6465
6466         /* Send BD initiator control block */
6467         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6468                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6469
6470         /* Send BD completion control block */
6471         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6472
6473         /* Receive list placement control block */
6474         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6475                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6476         printk("       RCVLPC_STATSCTRL[%08x]\n",
6477                tr32(RCVLPC_STATSCTRL));
6478
6479         /* Receive data and receive BD initiator control block */
6480         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6481                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6482
6483         /* Receive data completion control block */
6484         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6485                tr32(RCVDCC_MODE));
6486
6487         /* Receive BD initiator control block */
6488         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6489                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6490
6491         /* Receive BD completion control block */
6492         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6493                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6494
6495         /* Receive list selector control block */
6496         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6497                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6498
6499         /* Mbuf cluster free block */
6500         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6501                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6502
6503         /* Host coalescing control block */
6504         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6505                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6506         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6507                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6508                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6509         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6510                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6511                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6512         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6513                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6514         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6515                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6516
6517         /* Memory arbiter control block */
6518         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6519                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6520
6521         /* Buffer manager control block */
6522         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6523                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6524         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6525                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6526         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6527                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6528                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6529                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6530
6531         /* Read DMA control block */
6532         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6533                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6534
6535         /* Write DMA control block */
6536         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6537                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6538
6539         /* DMA completion block */
6540         printk("DEBUG: DMAC_MODE[%08x]\n",
6541                tr32(DMAC_MODE));
6542
6543         /* GRC block */
6544         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6545                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6546         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6547                tr32(GRC_LOCAL_CTRL));
6548
6549         /* TG3_BDINFOs */
6550         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6551                tr32(RCVDBDI_JUMBO_BD + 0x0),
6552                tr32(RCVDBDI_JUMBO_BD + 0x4),
6553                tr32(RCVDBDI_JUMBO_BD + 0x8),
6554                tr32(RCVDBDI_JUMBO_BD + 0xc));
6555         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6556                tr32(RCVDBDI_STD_BD + 0x0),
6557                tr32(RCVDBDI_STD_BD + 0x4),
6558                tr32(RCVDBDI_STD_BD + 0x8),
6559                tr32(RCVDBDI_STD_BD + 0xc));
6560         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6561                tr32(RCVDBDI_MINI_BD + 0x0),
6562                tr32(RCVDBDI_MINI_BD + 0x4),
6563                tr32(RCVDBDI_MINI_BD + 0x8),
6564                tr32(RCVDBDI_MINI_BD + 0xc));
6565
6566         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6567         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6568         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6569         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6570         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6571                val32, val32_2, val32_3, val32_4);
6572
6573         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6574         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6575         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6576         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6577         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6578                val32, val32_2, val32_3, val32_4);
6579
6580         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6581         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6582         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6583         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6584         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6585         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6586                val32, val32_2, val32_3, val32_4, val32_5);
6587
6588         /* SW status block */
6589         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6590                tp->hw_status->status,
6591                tp->hw_status->status_tag,
6592                tp->hw_status->rx_jumbo_consumer,
6593                tp->hw_status->rx_consumer,
6594                tp->hw_status->rx_mini_consumer,
6595                tp->hw_status->idx[0].rx_producer,
6596                tp->hw_status->idx[0].tx_consumer);
6597
6598         /* SW statistics block */
6599         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6600                ((u32 *)tp->hw_stats)[0],
6601                ((u32 *)tp->hw_stats)[1],
6602                ((u32 *)tp->hw_stats)[2],
6603                ((u32 *)tp->hw_stats)[3]);
6604
6605         /* Mailboxes */
6606         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6607                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6608                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6609                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6610                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6611
6612         /* NIC side send descriptors. */
6613         for (i = 0; i < 6; i++) {
6614                 unsigned long txd;
6615
6616                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6617                         + (i * sizeof(struct tg3_tx_buffer_desc));
6618                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6619                        i,
6620                        readl(txd + 0x0), readl(txd + 0x4),
6621                        readl(txd + 0x8), readl(txd + 0xc));
6622         }
6623
6624         /* NIC side RX descriptors. */
6625         for (i = 0; i < 6; i++) {
6626                 unsigned long rxd;
6627
6628                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6629                         + (i * sizeof(struct tg3_rx_buffer_desc));
6630                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6631                        i,
6632                        readl(rxd + 0x0), readl(rxd + 0x4),
6633                        readl(rxd + 0x8), readl(rxd + 0xc));
6634                 rxd += (4 * sizeof(u32));
6635                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6636                        i,
6637                        readl(rxd + 0x0), readl(rxd + 0x4),
6638                        readl(rxd + 0x8), readl(rxd + 0xc));
6639         }
6640
6641         for (i = 0; i < 6; i++) {
6642                 unsigned long rxd;
6643
6644                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6645                         + (i * sizeof(struct tg3_rx_buffer_desc));
6646                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6647                        i,
6648                        readl(rxd + 0x0), readl(rxd + 0x4),
6649                        readl(rxd + 0x8), readl(rxd + 0xc));
6650                 rxd += (4 * sizeof(u32));
6651                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6652                        i,
6653                        readl(rxd + 0x0), readl(rxd + 0x4),
6654                        readl(rxd + 0x8), readl(rxd + 0xc));
6655         }
6656 }
6657 #endif
6658
6659 static struct net_device_stats *tg3_get_stats(struct net_device *);
6660 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6661
6662 static int tg3_close(struct net_device *dev)
6663 {
6664         struct tg3 *tp = netdev_priv(dev);
6665
6666         netif_stop_queue(dev);
6667
6668         del_timer_sync(&tp->timer);
6669
6670         tg3_full_lock(tp, 1);
6671 #if 0
6672         tg3_dump_state(tp);
6673 #endif
6674
6675         tg3_disable_ints(tp);
6676
6677         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6678         tg3_free_rings(tp);
6679         tp->tg3_flags &=
6680                 ~(TG3_FLAG_INIT_COMPLETE |
6681                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6682         netif_carrier_off(tp->dev);
6683
6684         tg3_full_unlock(tp);
6685
6686         free_irq(tp->pdev->irq, dev);
6687         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6688                 pci_disable_msi(tp->pdev);
6689                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6690         }
6691
6692         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6693                sizeof(tp->net_stats_prev));
6694         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6695                sizeof(tp->estats_prev));
6696
6697         tg3_free_consistent(tp);
6698
6699         return 0;
6700 }
6701
6702 static inline unsigned long get_stat64(tg3_stat64_t *val)
6703 {
6704         unsigned long ret;
6705
6706 #if (BITS_PER_LONG == 32)
6707         ret = val->low;
6708 #else
6709         ret = ((u64)val->high << 32) | ((u64)val->low);
6710 #endif
6711         return ret;
6712 }
6713
6714 static unsigned long calc_crc_errors(struct tg3 *tp)
6715 {
6716         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6717
6718         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6719             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6720              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6721                 u32 val;
6722
6723                 spin_lock_bh(&tp->lock);
6724                 if (!tg3_readphy(tp, 0x1e, &val)) {
6725                         tg3_writephy(tp, 0x1e, val | 0x8000);
6726                         tg3_readphy(tp, 0x14, &val);
6727                 } else
6728                         val = 0;
6729                 spin_unlock_bh(&tp->lock);
6730
6731                 tp->phy_crc_errors += val;
6732
6733                 return tp->phy_crc_errors;
6734         }
6735
6736         return get_stat64(&hw_stats->rx_fcs_errors);
6737 }
6738
6739 #define ESTAT_ADD(member) \
6740         estats->member =        old_estats->member + \
6741                                 get_stat64(&hw_stats->member)
6742
6743 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6744 {
6745         struct tg3_ethtool_stats *estats = &tp->estats;
6746         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6747         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6748
6749         if (!hw_stats)
6750                 return old_estats;
6751
6752         ESTAT_ADD(rx_octets);
6753         ESTAT_ADD(rx_fragments);
6754         ESTAT_ADD(rx_ucast_packets);
6755         ESTAT_ADD(rx_mcast_packets);
6756         ESTAT_ADD(rx_bcast_packets);
6757         ESTAT_ADD(rx_fcs_errors);
6758         ESTAT_ADD(rx_align_errors);
6759         ESTAT_ADD(rx_xon_pause_rcvd);
6760         ESTAT_ADD(rx_xoff_pause_rcvd);
6761         ESTAT_ADD(rx_mac_ctrl_rcvd);
6762         ESTAT_ADD(rx_xoff_entered);
6763         ESTAT_ADD(rx_frame_too_long_errors);
6764         ESTAT_ADD(rx_jabbers);
6765         ESTAT_ADD(rx_undersize_packets);
6766         ESTAT_ADD(rx_in_length_errors);
6767         ESTAT_ADD(rx_out_length_errors);
6768         ESTAT_ADD(rx_64_or_less_octet_packets);
6769         ESTAT_ADD(rx_65_to_127_octet_packets);
6770         ESTAT_ADD(rx_128_to_255_octet_packets);
6771         ESTAT_ADD(rx_256_to_511_octet_packets);
6772         ESTAT_ADD(rx_512_to_1023_octet_packets);
6773         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6774         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6775         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6776         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6777         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6778
6779         ESTAT_ADD(tx_octets);
6780         ESTAT_ADD(tx_collisions);
6781         ESTAT_ADD(tx_xon_sent);
6782         ESTAT_ADD(tx_xoff_sent);
6783         ESTAT_ADD(tx_flow_control);
6784         ESTAT_ADD(tx_mac_errors);
6785         ESTAT_ADD(tx_single_collisions);
6786         ESTAT_ADD(tx_mult_collisions);
6787         ESTAT_ADD(tx_deferred);
6788         ESTAT_ADD(tx_excessive_collisions);
6789         ESTAT_ADD(tx_late_collisions);
6790         ESTAT_ADD(tx_collide_2times);
6791         ESTAT_ADD(tx_collide_3times);
6792         ESTAT_ADD(tx_collide_4times);
6793         ESTAT_ADD(tx_collide_5times);
6794         ESTAT_ADD(tx_collide_6times);
6795         ESTAT_ADD(tx_collide_7times);
6796         ESTAT_ADD(tx_collide_8times);
6797         ESTAT_ADD(tx_collide_9times);
6798         ESTAT_ADD(tx_collide_10times);
6799         ESTAT_ADD(tx_collide_11times);
6800         ESTAT_ADD(tx_collide_12times);
6801         ESTAT_ADD(tx_collide_13times);
6802         ESTAT_ADD(tx_collide_14times);
6803         ESTAT_ADD(tx_collide_15times);
6804         ESTAT_ADD(tx_ucast_packets);
6805         ESTAT_ADD(tx_mcast_packets);
6806         ESTAT_ADD(tx_bcast_packets);
6807         ESTAT_ADD(tx_carrier_sense_errors);
6808         ESTAT_ADD(tx_discards);
6809         ESTAT_ADD(tx_errors);
6810
6811         ESTAT_ADD(dma_writeq_full);
6812         ESTAT_ADD(dma_write_prioq_full);
6813         ESTAT_ADD(rxbds_empty);
6814         ESTAT_ADD(rx_discards);
6815         ESTAT_ADD(rx_errors);
6816         ESTAT_ADD(rx_threshold_hit);
6817
6818         ESTAT_ADD(dma_readq_full);
6819         ESTAT_ADD(dma_read_prioq_full);
6820         ESTAT_ADD(tx_comp_queue_full);
6821
6822         ESTAT_ADD(ring_set_send_prod_index);
6823         ESTAT_ADD(ring_status_update);
6824         ESTAT_ADD(nic_irqs);
6825         ESTAT_ADD(nic_avoided_irqs);
6826         ESTAT_ADD(nic_tx_threshold_hit);
6827
6828         return estats;
6829 }
6830
6831 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6832 {
6833         struct tg3 *tp = netdev_priv(dev);
6834         struct net_device_stats *stats = &tp->net_stats;
6835         struct net_device_stats *old_stats = &tp->net_stats_prev;
6836         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6837
6838         if (!hw_stats)
6839                 return old_stats;
6840
6841         stats->rx_packets = old_stats->rx_packets +
6842                 get_stat64(&hw_stats->rx_ucast_packets) +
6843                 get_stat64(&hw_stats->rx_mcast_packets) +
6844                 get_stat64(&hw_stats->rx_bcast_packets);
6845                 
6846         stats->tx_packets = old_stats->tx_packets +
6847                 get_stat64(&hw_stats->tx_ucast_packets) +
6848                 get_stat64(&hw_stats->tx_mcast_packets) +
6849                 get_stat64(&hw_stats->tx_bcast_packets);
6850
6851         stats->rx_bytes = old_stats->rx_bytes +
6852                 get_stat64(&hw_stats->rx_octets);
6853         stats->tx_bytes = old_stats->tx_bytes +
6854                 get_stat64(&hw_stats->tx_octets);
6855
6856         stats->rx_errors = old_stats->rx_errors +
6857                 get_stat64(&hw_stats->rx_errors) +
6858                 get_stat64(&hw_stats->rx_discards);
6859         stats->tx_errors = old_stats->tx_errors +
6860                 get_stat64(&hw_stats->tx_errors) +
6861                 get_stat64(&hw_stats->tx_mac_errors) +
6862                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6863                 get_stat64(&hw_stats->tx_discards);
6864
6865         stats->multicast = old_stats->multicast +
6866                 get_stat64(&hw_stats->rx_mcast_packets);
6867         stats->collisions = old_stats->collisions +
6868                 get_stat64(&hw_stats->tx_collisions);
6869
6870         stats->rx_length_errors = old_stats->rx_length_errors +
6871                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6872                 get_stat64(&hw_stats->rx_undersize_packets);
6873
6874         stats->rx_over_errors = old_stats->rx_over_errors +
6875                 get_stat64(&hw_stats->rxbds_empty);
6876         stats->rx_frame_errors = old_stats->rx_frame_errors +
6877                 get_stat64(&hw_stats->rx_align_errors);
6878         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6879                 get_stat64(&hw_stats->tx_discards);
6880         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6881                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6882
6883         stats->rx_crc_errors = old_stats->rx_crc_errors +
6884                 calc_crc_errors(tp);
6885
6886         return stats;
6887 }
6888
6889 static inline u32 calc_crc(unsigned char *buf, int len)
6890 {
6891         u32 reg;
6892         u32 tmp;
6893         int j, k;
6894
6895         reg = 0xffffffff;
6896
6897         for (j = 0; j < len; j++) {
6898                 reg ^= buf[j];
6899
6900                 for (k = 0; k < 8; k++) {
6901                         tmp = reg & 0x01;
6902
6903                         reg >>= 1;
6904
6905                         if (tmp) {
6906                                 reg ^= 0xedb88320;
6907                         }
6908                 }
6909         }
6910
6911         return ~reg;
6912 }
6913
6914 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6915 {
6916         /* accept or reject all multicast frames */
6917         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6918         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6919         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6920         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6921 }
6922
6923 static void __tg3_set_rx_mode(struct net_device *dev)
6924 {
6925         struct tg3 *tp = netdev_priv(dev);
6926         u32 rx_mode;
6927
6928         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6929                                   RX_MODE_KEEP_VLAN_TAG);
6930
6931         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6932          * flag clear.
6933          */
6934 #if TG3_VLAN_TAG_USED
6935         if (!tp->vlgrp &&
6936             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6937                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6938 #else
6939         /* By definition, VLAN is disabled always in this
6940          * case.
6941          */
6942         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6943                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6944 #endif
6945
6946         if (dev->flags & IFF_PROMISC) {
6947                 /* Promiscuous mode. */
6948                 rx_mode |= RX_MODE_PROMISC;
6949         } else if (dev->flags & IFF_ALLMULTI) {
6950                 /* Accept all multicast. */
6951                 tg3_set_multi (tp, 1);
6952         } else if (dev->mc_count < 1) {
6953                 /* Reject all multicast. */
6954                 tg3_set_multi (tp, 0);
6955         } else {
6956                 /* Accept one or more multicast(s). */
6957                 struct dev_mc_list *mclist;
6958                 unsigned int i;
6959                 u32 mc_filter[4] = { 0, };
6960                 u32 regidx;
6961                 u32 bit;
6962                 u32 crc;
6963
6964                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6965                      i++, mclist = mclist->next) {
6966
6967                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6968                         bit = ~crc & 0x7f;
6969                         regidx = (bit & 0x60) >> 5;
6970                         bit &= 0x1f;
6971                         mc_filter[regidx] |= (1 << bit);
6972                 }
6973
6974                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6975                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6976                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6977                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6978         }
6979
6980         if (rx_mode != tp->rx_mode) {
6981                 tp->rx_mode = rx_mode;
6982                 tw32_f(MAC_RX_MODE, rx_mode);
6983                 udelay(10);
6984         }
6985 }
6986
6987 static void tg3_set_rx_mode(struct net_device *dev)
6988 {
6989         struct tg3 *tp = netdev_priv(dev);
6990
6991         tg3_full_lock(tp, 0);
6992         __tg3_set_rx_mode(dev);
6993         tg3_full_unlock(tp);
6994 }
6995
6996 #define TG3_REGDUMP_LEN         (32 * 1024)
6997
6998 static int tg3_get_regs_len(struct net_device *dev)
6999 {
7000         return TG3_REGDUMP_LEN;
7001 }
7002
7003 static void tg3_get_regs(struct net_device *dev,
7004                 struct ethtool_regs *regs, void *_p)
7005 {
7006         u32 *p = _p;
7007         struct tg3 *tp = netdev_priv(dev);
7008         u8 *orig_p = _p;
7009         int i;
7010
7011         regs->version = 0;
7012
7013         memset(p, 0, TG3_REGDUMP_LEN);
7014
7015         tg3_full_lock(tp, 0);
7016
7017 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7018 #define GET_REG32_LOOP(base,len)                \
7019 do {    p = (u32 *)(orig_p + (base));           \
7020         for (i = 0; i < len; i += 4)            \
7021                 __GET_REG32((base) + i);        \
7022 } while (0)
7023 #define GET_REG32_1(reg)                        \
7024 do {    p = (u32 *)(orig_p + (reg));            \
7025         __GET_REG32((reg));                     \
7026 } while (0)
7027
7028         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7029         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7030         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7031         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7032         GET_REG32_1(SNDDATAC_MODE);
7033         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7034         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7035         GET_REG32_1(SNDBDC_MODE);
7036         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7037         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7038         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7039         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7040         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7041         GET_REG32_1(RCVDCC_MODE);
7042         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7043         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7044         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7045         GET_REG32_1(MBFREE_MODE);
7046         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7047         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7048         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7049         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7050         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7051         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7052         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7053         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7054         GET_REG32_LOOP(FTQ_RESET, 0x120);
7055         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7056         GET_REG32_1(DMAC_MODE);
7057         GET_REG32_LOOP(GRC_MODE, 0x4c);
7058         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7059                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7060
7061 #undef __GET_REG32
7062 #undef GET_REG32_LOOP
7063 #undef GET_REG32_1
7064
7065         tg3_full_unlock(tp);
7066 }
7067
7068 static int tg3_get_eeprom_len(struct net_device *dev)
7069 {
7070         struct tg3 *tp = netdev_priv(dev);
7071
7072         return tp->nvram_size;
7073 }
7074
7075 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7076
7077 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7078 {
7079         struct tg3 *tp = netdev_priv(dev);
7080         int ret;
7081         u8  *pd;
7082         u32 i, offset, len, val, b_offset, b_count;
7083
7084         offset = eeprom->offset;
7085         len = eeprom->len;
7086         eeprom->len = 0;
7087
7088         eeprom->magic = TG3_EEPROM_MAGIC;
7089
7090         if (offset & 3) {
7091                 /* adjustments to start on required 4 byte boundary */
7092                 b_offset = offset & 3;
7093                 b_count = 4 - b_offset;
7094                 if (b_count > len) {
7095                         /* i.e. offset=1 len=2 */
7096                         b_count = len;
7097                 }
7098                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7099                 if (ret)
7100                         return ret;
7101                 val = cpu_to_le32(val);
7102                 memcpy(data, ((char*)&val) + b_offset, b_count);
7103                 len -= b_count;
7104                 offset += b_count;
7105                 eeprom->len += b_count;
7106         }
7107
7108         /* read bytes upto the last 4 byte boundary */
7109         pd = &data[eeprom->len];
7110         for (i = 0; i < (len - (len & 3)); i += 4) {
7111                 ret = tg3_nvram_read(tp, offset + i, &val);
7112                 if (ret) {
7113                         eeprom->len += i;
7114                         return ret;
7115                 }
7116                 val = cpu_to_le32(val);
7117                 memcpy(pd + i, &val, 4);
7118         }
7119         eeprom->len += i;
7120
7121         if (len & 3) {
7122                 /* read last bytes not ending on 4 byte boundary */
7123                 pd = &data[eeprom->len];
7124                 b_count = len & 3;
7125                 b_offset = offset + len - b_count;
7126                 ret = tg3_nvram_read(tp, b_offset, &val);
7127                 if (ret)
7128                         return ret;
7129                 val = cpu_to_le32(val);
7130                 memcpy(pd, ((char*)&val), b_count);
7131                 eeprom->len += b_count;
7132         }
7133         return 0;
7134 }
7135
7136 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7137
7138 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7139 {
7140         struct tg3 *tp = netdev_priv(dev);
7141         int ret;
7142         u32 offset, len, b_offset, odd_len, start, end;
7143         u8 *buf;
7144
7145         if (eeprom->magic != TG3_EEPROM_MAGIC)
7146                 return -EINVAL;
7147
7148         offset = eeprom->offset;
7149         len = eeprom->len;
7150
7151         if ((b_offset = (offset & 3))) {
7152                 /* adjustments to start on required 4 byte boundary */
7153                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7154                 if (ret)
7155                         return ret;
7156                 start = cpu_to_le32(start);
7157                 len += b_offset;
7158                 offset &= ~3;
7159                 if (len < 4)
7160                         len = 4;
7161         }
7162
7163         odd_len = 0;
7164         if (len & 3) {
7165                 /* adjustments to end on required 4 byte boundary */
7166                 odd_len = 1;
7167                 len = (len + 3) & ~3;
7168                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7169                 if (ret)
7170                         return ret;
7171                 end = cpu_to_le32(end);
7172         }
7173
7174         buf = data;
7175         if (b_offset || odd_len) {
7176                 buf = kmalloc(len, GFP_KERNEL);
7177                 if (buf == 0)
7178                         return -ENOMEM;
7179                 if (b_offset)
7180                         memcpy(buf, &start, 4);
7181                 if (odd_len)
7182                         memcpy(buf+len-4, &end, 4);
7183                 memcpy(buf + b_offset, data, eeprom->len);
7184         }
7185
7186         ret = tg3_nvram_write_block(tp, offset, len, buf);
7187
7188         if (buf != data)
7189                 kfree(buf);
7190
7191         return ret;
7192 }
7193
7194 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7195 {
7196         struct tg3 *tp = netdev_priv(dev);
7197   
7198         cmd->supported = (SUPPORTED_Autoneg);
7199
7200         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7201                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7202                                    SUPPORTED_1000baseT_Full);
7203
7204         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
7205                 cmd->supported |= (SUPPORTED_100baseT_Half |
7206                                   SUPPORTED_100baseT_Full |
7207                                   SUPPORTED_10baseT_Half |
7208                                   SUPPORTED_10baseT_Full |
7209                                   SUPPORTED_MII);
7210         else
7211                 cmd->supported |= SUPPORTED_FIBRE;
7212   
7213         cmd->advertising = tp->link_config.advertising;
7214         if (netif_running(dev)) {
7215                 cmd->speed = tp->link_config.active_speed;
7216                 cmd->duplex = tp->link_config.active_duplex;
7217         }
7218         cmd->port = 0;
7219         cmd->phy_address = PHY_ADDR;
7220         cmd->transceiver = 0;
7221         cmd->autoneg = tp->link_config.autoneg;
7222         cmd->maxtxpkt = 0;
7223         cmd->maxrxpkt = 0;
7224         return 0;
7225 }
7226   
7227 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7228 {
7229         struct tg3 *tp = netdev_priv(dev);
7230   
7231         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7232                 /* These are the only valid advertisement bits allowed.  */
7233                 if (cmd->autoneg == AUTONEG_ENABLE &&
7234                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7235                                           ADVERTISED_1000baseT_Full |
7236                                           ADVERTISED_Autoneg |
7237                                           ADVERTISED_FIBRE)))
7238                         return -EINVAL;
7239         }
7240
7241         tg3_full_lock(tp, 0);
7242
7243         tp->link_config.autoneg = cmd->autoneg;
7244         if (cmd->autoneg == AUTONEG_ENABLE) {
7245                 tp->link_config.advertising = cmd->advertising;
7246                 tp->link_config.speed = SPEED_INVALID;
7247                 tp->link_config.duplex = DUPLEX_INVALID;
7248         } else {
7249                 tp->link_config.advertising = 0;
7250                 tp->link_config.speed = cmd->speed;
7251                 tp->link_config.duplex = cmd->duplex;
7252         }
7253   
7254         if (netif_running(dev))
7255                 tg3_setup_phy(tp, 1);
7256
7257         tg3_full_unlock(tp);
7258   
7259         return 0;
7260 }
7261   
7262 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7263 {
7264         struct tg3 *tp = netdev_priv(dev);
7265   
7266         strcpy(info->driver, DRV_MODULE_NAME);
7267         strcpy(info->version, DRV_MODULE_VERSION);
7268         strcpy(info->bus_info, pci_name(tp->pdev));
7269 }
7270   
7271 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7272 {
7273         struct tg3 *tp = netdev_priv(dev);
7274   
7275         wol->supported = WAKE_MAGIC;
7276         wol->wolopts = 0;
7277         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7278                 wol->wolopts = WAKE_MAGIC;
7279         memset(&wol->sopass, 0, sizeof(wol->sopass));
7280 }
7281   
7282 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7283 {
7284         struct tg3 *tp = netdev_priv(dev);
7285   
7286         if (wol->wolopts & ~WAKE_MAGIC)
7287                 return -EINVAL;
7288         if ((wol->wolopts & WAKE_MAGIC) &&
7289             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7290             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7291                 return -EINVAL;
7292   
7293         spin_lock_bh(&tp->lock);
7294         if (wol->wolopts & WAKE_MAGIC)
7295                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7296         else
7297                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7298         spin_unlock_bh(&tp->lock);
7299   
7300         return 0;
7301 }
7302   
7303 static u32 tg3_get_msglevel(struct net_device *dev)
7304 {
7305         struct tg3 *tp = netdev_priv(dev);
7306         return tp->msg_enable;
7307 }
7308   
7309 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7310 {
7311         struct tg3 *tp = netdev_priv(dev);
7312         tp->msg_enable = value;
7313 }
7314   
7315 #if TG3_TSO_SUPPORT != 0
7316 static int tg3_set_tso(struct net_device *dev, u32 value)
7317 {
7318         struct tg3 *tp = netdev_priv(dev);
7319
7320         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7321                 if (value)
7322                         return -EINVAL;
7323                 return 0;
7324         }
7325         return ethtool_op_set_tso(dev, value);
7326 }
7327 #endif
7328   
7329 static int tg3_nway_reset(struct net_device *dev)
7330 {
7331         struct tg3 *tp = netdev_priv(dev);
7332         u32 bmcr;
7333         int r;
7334   
7335         if (!netif_running(dev))
7336                 return -EAGAIN;
7337
7338         spin_lock_bh(&tp->lock);
7339         r = -EINVAL;
7340         tg3_readphy(tp, MII_BMCR, &bmcr);
7341         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7342             (bmcr & BMCR_ANENABLE)) {
7343                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7344                 r = 0;
7345         }
7346         spin_unlock_bh(&tp->lock);
7347   
7348         return r;
7349 }
7350   
7351 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7352 {
7353         struct tg3 *tp = netdev_priv(dev);
7354   
7355         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7356         ering->rx_mini_max_pending = 0;
7357         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7358
7359         ering->rx_pending = tp->rx_pending;
7360         ering->rx_mini_pending = 0;
7361         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7362         ering->tx_pending = tp->tx_pending;
7363 }
7364   
7365 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7366 {
7367         struct tg3 *tp = netdev_priv(dev);
7368         int irq_sync = 0;
7369   
7370         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7371             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7372             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7373                 return -EINVAL;
7374   
7375         if (netif_running(dev)) {
7376                 tg3_netif_stop(tp);
7377                 irq_sync = 1;
7378         }
7379
7380         tg3_full_lock(tp, irq_sync);
7381   
7382         tp->rx_pending = ering->rx_pending;
7383
7384         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7385             tp->rx_pending > 63)
7386                 tp->rx_pending = 63;
7387         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7388         tp->tx_pending = ering->tx_pending;
7389
7390         if (netif_running(dev)) {
7391                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7392                 tg3_init_hw(tp);
7393                 tg3_netif_start(tp);
7394         }
7395
7396         tg3_full_unlock(tp);
7397   
7398         return 0;
7399 }
7400   
7401 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7402 {
7403         struct tg3 *tp = netdev_priv(dev);
7404   
7405         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7406         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7407         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7408 }
7409   
7410 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7411 {
7412         struct tg3 *tp = netdev_priv(dev);
7413         int irq_sync = 0;
7414   
7415         if (netif_running(dev)) {
7416                 tg3_netif_stop(tp);
7417                 irq_sync = 1;
7418         }
7419
7420         tg3_full_lock(tp, irq_sync);
7421
7422         if (epause->autoneg)
7423                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7424         else
7425                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7426         if (epause->rx_pause)
7427                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7428         else
7429                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7430         if (epause->tx_pause)
7431                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7432         else
7433                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7434
7435         if (netif_running(dev)) {
7436                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7437                 tg3_init_hw(tp);
7438                 tg3_netif_start(tp);
7439         }
7440
7441         tg3_full_unlock(tp);
7442   
7443         return 0;
7444 }
7445   
7446 static u32 tg3_get_rx_csum(struct net_device *dev)
7447 {
7448         struct tg3 *tp = netdev_priv(dev);
7449         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7450 }
7451   
7452 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7453 {
7454         struct tg3 *tp = netdev_priv(dev);
7455   
7456         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7457                 if (data != 0)
7458                         return -EINVAL;
7459                 return 0;
7460         }
7461   
7462         spin_lock_bh(&tp->lock);
7463         if (data)
7464                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7465         else
7466                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7467         spin_unlock_bh(&tp->lock);
7468   
7469         return 0;
7470 }
7471   
7472 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7473 {
7474         struct tg3 *tp = netdev_priv(dev);
7475   
7476         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7477                 if (data != 0)
7478                         return -EINVAL;
7479                 return 0;
7480         }
7481   
7482         if (data)
7483                 dev->features |= NETIF_F_IP_CSUM;
7484         else
7485                 dev->features &= ~NETIF_F_IP_CSUM;
7486
7487         return 0;
7488 }
7489
7490 static int tg3_get_stats_count (struct net_device *dev)
7491 {
7492         return TG3_NUM_STATS;
7493 }
7494
7495 static int tg3_get_test_count (struct net_device *dev)
7496 {
7497         return TG3_NUM_TEST;
7498 }
7499
7500 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7501 {
7502         switch (stringset) {
7503         case ETH_SS_STATS:
7504                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7505                 break;
7506         case ETH_SS_TEST:
7507                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7508                 break;
7509         default:
7510                 WARN_ON(1);     /* we need a WARN() */
7511                 break;
7512         }
7513 }
7514
7515 static void tg3_get_ethtool_stats (struct net_device *dev,
7516                                    struct ethtool_stats *estats, u64 *tmp_stats)
7517 {
7518         struct tg3 *tp = netdev_priv(dev);
7519         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7520 }
7521
7522 #define NVRAM_TEST_SIZE 0x100
7523
7524 static int tg3_test_nvram(struct tg3 *tp)
7525 {
7526         u32 *buf, csum;
7527         int i, j, err = 0;
7528
7529         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7530         if (buf == NULL)
7531                 return -ENOMEM;
7532
7533         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7534                 u32 val;
7535
7536                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7537                         break;
7538                 buf[j] = cpu_to_le32(val);
7539         }
7540         if (i < NVRAM_TEST_SIZE)
7541                 goto out;
7542
7543         err = -EIO;
7544         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7545                 goto out;
7546
7547         /* Bootstrap checksum at offset 0x10 */
7548         csum = calc_crc((unsigned char *) buf, 0x10);
7549         if(csum != cpu_to_le32(buf[0x10/4]))
7550                 goto out;
7551
7552         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7553         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7554         if (csum != cpu_to_le32(buf[0xfc/4]))
7555                  goto out;
7556
7557         err = 0;
7558
7559 out:
7560         kfree(buf);
7561         return err;
7562 }
7563
7564 #define TG3_SERDES_TIMEOUT_SEC  2
7565 #define TG3_COPPER_TIMEOUT_SEC  6
7566
7567 static int tg3_test_link(struct tg3 *tp)
7568 {
7569         int i, max;
7570
7571         if (!netif_running(tp->dev))
7572                 return -ENODEV;
7573
7574         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7575                 max = TG3_SERDES_TIMEOUT_SEC;
7576         else
7577                 max = TG3_COPPER_TIMEOUT_SEC;
7578
7579         for (i = 0; i < max; i++) {
7580                 if (netif_carrier_ok(tp->dev))
7581                         return 0;
7582
7583                 if (msleep_interruptible(1000))
7584                         break;
7585         }
7586
7587         return -EIO;
7588 }
7589
7590 /* Only test the commonly used registers */
7591 static int tg3_test_registers(struct tg3 *tp)
7592 {
7593         int i, is_5705;
7594         u32 offset, read_mask, write_mask, val, save_val, read_val;
7595         static struct {
7596                 u16 offset;
7597                 u16 flags;
7598 #define TG3_FL_5705     0x1
7599 #define TG3_FL_NOT_5705 0x2
7600 #define TG3_FL_NOT_5788 0x4
7601                 u32 read_mask;
7602                 u32 write_mask;
7603         } reg_tbl[] = {
7604                 /* MAC Control Registers */
7605                 { MAC_MODE, TG3_FL_NOT_5705,
7606                         0x00000000, 0x00ef6f8c },
7607                 { MAC_MODE, TG3_FL_5705,
7608                         0x00000000, 0x01ef6b8c },
7609                 { MAC_STATUS, TG3_FL_NOT_5705,
7610                         0x03800107, 0x00000000 },
7611                 { MAC_STATUS, TG3_FL_5705,
7612                         0x03800100, 0x00000000 },
7613                 { MAC_ADDR_0_HIGH, 0x0000,
7614                         0x00000000, 0x0000ffff },
7615                 { MAC_ADDR_0_LOW, 0x0000,
7616                         0x00000000, 0xffffffff },
7617                 { MAC_RX_MTU_SIZE, 0x0000,
7618                         0x00000000, 0x0000ffff },
7619                 { MAC_TX_MODE, 0x0000,
7620                         0x00000000, 0x00000070 },
7621                 { MAC_TX_LENGTHS, 0x0000,
7622                         0x00000000, 0x00003fff },
7623                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7624                         0x00000000, 0x000007fc },
7625                 { MAC_RX_MODE, TG3_FL_5705,
7626                         0x00000000, 0x000007dc },
7627                 { MAC_HASH_REG_0, 0x0000,
7628                         0x00000000, 0xffffffff },
7629                 { MAC_HASH_REG_1, 0x0000,
7630                         0x00000000, 0xffffffff },
7631                 { MAC_HASH_REG_2, 0x0000,
7632                         0x00000000, 0xffffffff },
7633                 { MAC_HASH_REG_3, 0x0000,
7634                         0x00000000, 0xffffffff },
7635
7636                 /* Receive Data and Receive BD Initiator Control Registers. */
7637                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7638                         0x00000000, 0xffffffff },
7639                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7640                         0x00000000, 0xffffffff },
7641                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7642                         0x00000000, 0x00000003 },
7643                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7644                         0x00000000, 0xffffffff },
7645                 { RCVDBDI_STD_BD+0, 0x0000,
7646                         0x00000000, 0xffffffff },
7647                 { RCVDBDI_STD_BD+4, 0x0000,
7648                         0x00000000, 0xffffffff },
7649                 { RCVDBDI_STD_BD+8, 0x0000,
7650                         0x00000000, 0xffff0002 },
7651                 { RCVDBDI_STD_BD+0xc, 0x0000,
7652                         0x00000000, 0xffffffff },
7653         
7654                 /* Receive BD Initiator Control Registers. */
7655                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7656                         0x00000000, 0xffffffff },
7657                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7658                         0x00000000, 0x000003ff },
7659                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7660                         0x00000000, 0xffffffff },
7661         
7662                 /* Host Coalescing Control Registers. */
7663                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7664                         0x00000000, 0x00000004 },
7665                 { HOSTCC_MODE, TG3_FL_5705,
7666                         0x00000000, 0x000000f6 },
7667                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7668                         0x00000000, 0xffffffff },
7669                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7670                         0x00000000, 0x000003ff },
7671                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7672                         0x00000000, 0xffffffff },
7673                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7674                         0x00000000, 0x000003ff },
7675                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7676                         0x00000000, 0xffffffff },
7677                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7678                         0x00000000, 0x000000ff },
7679                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7680                         0x00000000, 0xffffffff },
7681                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7682                         0x00000000, 0x000000ff },
7683                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7684                         0x00000000, 0xffffffff },
7685                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7686                         0x00000000, 0xffffffff },
7687                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7688                         0x00000000, 0xffffffff },
7689                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7690                         0x00000000, 0x000000ff },
7691                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7692                         0x00000000, 0xffffffff },
7693                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7694                         0x00000000, 0x000000ff },
7695                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7696                         0x00000000, 0xffffffff },
7697                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7698                         0x00000000, 0xffffffff },
7699                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7700                         0x00000000, 0xffffffff },
7701                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7702                         0x00000000, 0xffffffff },
7703                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7704                         0x00000000, 0xffffffff },
7705                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7706                         0xffffffff, 0x00000000 },
7707                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7708                         0xffffffff, 0x00000000 },
7709
7710                 /* Buffer Manager Control Registers. */
7711                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7712                         0x00000000, 0x007fff80 },
7713                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7714                         0x00000000, 0x007fffff },
7715                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7716                         0x00000000, 0x0000003f },
7717                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7718                         0x00000000, 0x000001ff },
7719                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7720                         0x00000000, 0x000001ff },
7721                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7722                         0xffffffff, 0x00000000 },
7723                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7724                         0xffffffff, 0x00000000 },
7725         
7726                 /* Mailbox Registers */
7727                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7728                         0x00000000, 0x000001ff },
7729                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7730                         0x00000000, 0x000001ff },
7731                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7732                         0x00000000, 0x000007ff },
7733                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7734                         0x00000000, 0x000001ff },
7735
7736                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7737         };
7738
7739         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7740                 is_5705 = 1;
7741         else
7742                 is_5705 = 0;
7743
7744         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7745                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7746                         continue;
7747
7748                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7749                         continue;
7750
7751                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7752                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7753                         continue;
7754
7755                 offset = (u32) reg_tbl[i].offset;
7756                 read_mask = reg_tbl[i].read_mask;
7757                 write_mask = reg_tbl[i].write_mask;
7758
7759                 /* Save the original register content */
7760                 save_val = tr32(offset);
7761
7762                 /* Determine the read-only value. */
7763                 read_val = save_val & read_mask;
7764
7765                 /* Write zero to the register, then make sure the read-only bits
7766                  * are not changed and the read/write bits are all zeros.
7767                  */
7768                 tw32(offset, 0);
7769
7770                 val = tr32(offset);
7771
7772                 /* Test the read-only and read/write bits. */
7773                 if (((val & read_mask) != read_val) || (val & write_mask))
7774                         goto out;
7775
7776                 /* Write ones to all the bits defined by RdMask and WrMask, then
7777                  * make sure the read-only bits are not changed and the
7778                  * read/write bits are all ones.
7779                  */
7780                 tw32(offset, read_mask | write_mask);
7781
7782                 val = tr32(offset);
7783
7784                 /* Test the read-only bits. */
7785                 if ((val & read_mask) != read_val)
7786                         goto out;
7787
7788                 /* Test the read/write bits. */
7789                 if ((val & write_mask) != write_mask)
7790                         goto out;
7791
7792                 tw32(offset, save_val);
7793         }
7794
7795         return 0;
7796
7797 out:
7798         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7799         tw32(offset, save_val);
7800         return -EIO;
7801 }
7802
7803 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7804 {
7805         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7806         int i;
7807         u32 j;
7808
7809         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7810                 for (j = 0; j < len; j += 4) {
7811                         u32 val;
7812
7813                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7814                         tg3_read_mem(tp, offset + j, &val);
7815                         if (val != test_pattern[i])
7816                                 return -EIO;
7817                 }
7818         }
7819         return 0;
7820 }
7821
7822 static int tg3_test_memory(struct tg3 *tp)
7823 {
7824         static struct mem_entry {
7825                 u32 offset;
7826                 u32 len;
7827         } mem_tbl_570x[] = {
7828                 { 0x00000000, 0x01000},
7829                 { 0x00002000, 0x1c000},
7830                 { 0xffffffff, 0x00000}
7831         }, mem_tbl_5705[] = {
7832                 { 0x00000100, 0x0000c},
7833                 { 0x00000200, 0x00008},
7834                 { 0x00000b50, 0x00400},
7835                 { 0x00004000, 0x00800},
7836                 { 0x00006000, 0x01000},
7837                 { 0x00008000, 0x02000},
7838                 { 0x00010000, 0x0e000},
7839                 { 0xffffffff, 0x00000}
7840         };
7841         struct mem_entry *mem_tbl;
7842         int err = 0;
7843         int i;
7844
7845         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7846                 mem_tbl = mem_tbl_5705;
7847         else
7848                 mem_tbl = mem_tbl_570x;
7849
7850         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7851                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7852                     mem_tbl[i].len)) != 0)
7853                         break;
7854         }
7855         
7856         return err;
7857 }
7858
7859 static int tg3_test_loopback(struct tg3 *tp)
7860 {
7861         u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7862         u32 desc_idx;
7863         struct sk_buff *skb, *rx_skb;
7864         u8 *tx_data;
7865         dma_addr_t map;
7866         int num_pkts, tx_len, rx_len, i, err;
7867         struct tg3_rx_buffer_desc *desc;
7868
7869         if (!netif_running(tp->dev))
7870                 return -ENODEV;
7871
7872         err = -EIO;
7873
7874         tg3_reset_hw(tp);
7875
7876         mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7877                    MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7878                    MAC_MODE_PORT_MODE_GMII;
7879         tw32(MAC_MODE, mac_mode);
7880
7881         tx_len = 1514;
7882         skb = dev_alloc_skb(tx_len);
7883         tx_data = skb_put(skb, tx_len);
7884         memcpy(tx_data, tp->dev->dev_addr, 6);
7885         memset(tx_data + 6, 0x0, 8);
7886
7887         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7888
7889         for (i = 14; i < tx_len; i++)
7890                 tx_data[i] = (u8) (i & 0xff);
7891
7892         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7893
7894         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7895              HOSTCC_MODE_NOW);
7896
7897         udelay(10);
7898
7899         rx_start_idx = tp->hw_status->idx[0].rx_producer;
7900
7901         send_idx = 0;
7902         num_pkts = 0;
7903
7904         tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7905
7906         send_idx++;
7907         num_pkts++;
7908
7909         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7910         tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7911
7912         udelay(10);
7913
7914         for (i = 0; i < 10; i++) {
7915                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7916                        HOSTCC_MODE_NOW);
7917
7918                 udelay(10);
7919
7920                 tx_idx = tp->hw_status->idx[0].tx_consumer;
7921                 rx_idx = tp->hw_status->idx[0].rx_producer;
7922                 if ((tx_idx == send_idx) &&
7923                     (rx_idx == (rx_start_idx + num_pkts)))
7924                         break;
7925         }
7926
7927         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7928         dev_kfree_skb(skb);
7929
7930         if (tx_idx != send_idx)
7931                 goto out;
7932
7933         if (rx_idx != rx_start_idx + num_pkts)
7934                 goto out;
7935
7936         desc = &tp->rx_rcb[rx_start_idx];
7937         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7938         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7939         if (opaque_key != RXD_OPAQUE_RING_STD)
7940                 goto out;
7941
7942         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7943             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7944                 goto out;
7945
7946         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7947         if (rx_len != tx_len)
7948                 goto out;
7949
7950         rx_skb = tp->rx_std_buffers[desc_idx].skb;
7951
7952         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7953         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7954
7955         for (i = 14; i < tx_len; i++) {
7956                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7957                         goto out;
7958         }
7959         err = 0;
7960         
7961         /* tg3_free_rings will unmap and free the rx_skb */
7962 out:
7963         return err;
7964 }
7965
7966 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7967                           u64 *data)
7968 {
7969         struct tg3 *tp = netdev_priv(dev);
7970
7971         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7972
7973         if (tg3_test_nvram(tp) != 0) {
7974                 etest->flags |= ETH_TEST_FL_FAILED;
7975                 data[0] = 1;
7976         }
7977         if (tg3_test_link(tp) != 0) {
7978                 etest->flags |= ETH_TEST_FL_FAILED;
7979                 data[1] = 1;
7980         }
7981         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7982                 int irq_sync = 0;
7983
7984                 if (netif_running(dev)) {
7985                         tg3_netif_stop(tp);
7986                         irq_sync = 1;
7987                 }
7988
7989                 tg3_full_lock(tp, irq_sync);
7990
7991                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7992                 tg3_nvram_lock(tp);
7993                 tg3_halt_cpu(tp, RX_CPU_BASE);
7994                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7995                         tg3_halt_cpu(tp, TX_CPU_BASE);
7996                 tg3_nvram_unlock(tp);
7997
7998                 if (tg3_test_registers(tp) != 0) {
7999                         etest->flags |= ETH_TEST_FL_FAILED;
8000                         data[2] = 1;
8001                 }
8002                 if (tg3_test_memory(tp) != 0) {
8003                         etest->flags |= ETH_TEST_FL_FAILED;
8004                         data[3] = 1;
8005                 }
8006                 if (tg3_test_loopback(tp) != 0) {
8007                         etest->flags |= ETH_TEST_FL_FAILED;
8008                         data[4] = 1;
8009                 }
8010
8011                 tg3_full_unlock(tp);
8012
8013                 if (tg3_test_interrupt(tp) != 0) {
8014                         etest->flags |= ETH_TEST_FL_FAILED;
8015                         data[5] = 1;
8016                 }
8017
8018                 tg3_full_lock(tp, 0);
8019
8020                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8021                 if (netif_running(dev)) {
8022                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8023                         tg3_init_hw(tp);
8024                         tg3_netif_start(tp);
8025                 }
8026
8027                 tg3_full_unlock(tp);
8028         }
8029 }
8030
8031 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8032 {
8033         struct mii_ioctl_data *data = if_mii(ifr);
8034         struct tg3 *tp = netdev_priv(dev);
8035         int err;
8036
8037         switch(cmd) {
8038         case SIOCGMIIPHY:
8039                 data->phy_id = PHY_ADDR;
8040
8041                 /* fallthru */
8042         case SIOCGMIIREG: {
8043                 u32 mii_regval;
8044
8045                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8046                         break;                  /* We have no PHY */
8047
8048                 spin_lock_bh(&tp->lock);
8049                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8050                 spin_unlock_bh(&tp->lock);
8051
8052                 data->val_out = mii_regval;
8053
8054                 return err;
8055         }
8056
8057         case SIOCSMIIREG:
8058                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8059                         break;                  /* We have no PHY */
8060
8061                 if (!capable(CAP_NET_ADMIN))
8062                         return -EPERM;
8063
8064                 spin_lock_bh(&tp->lock);
8065                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8066                 spin_unlock_bh(&tp->lock);
8067
8068                 return err;
8069
8070         default:
8071                 /* do nothing */
8072                 break;
8073         }
8074         return -EOPNOTSUPP;
8075 }
8076
8077 #if TG3_VLAN_TAG_USED
8078 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8079 {
8080         struct tg3 *tp = netdev_priv(dev);
8081
8082         tg3_full_lock(tp, 0);
8083
8084         tp->vlgrp = grp;
8085
8086         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8087         __tg3_set_rx_mode(dev);
8088
8089         tg3_full_unlock(tp);
8090 }
8091
8092 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8093 {
8094         struct tg3 *tp = netdev_priv(dev);
8095
8096         tg3_full_lock(tp, 0);
8097         if (tp->vlgrp)
8098                 tp->vlgrp->vlan_devices[vid] = NULL;
8099         tg3_full_unlock(tp);
8100 }
8101 #endif
8102
8103 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8104 {
8105         struct tg3 *tp = netdev_priv(dev);
8106
8107         memcpy(ec, &tp->coal, sizeof(*ec));
8108         return 0;
8109 }
8110
8111 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8112 {
8113         struct tg3 *tp = netdev_priv(dev);
8114         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8115         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8116
8117         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8118                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8119                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8120                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8121                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8122         }
8123
8124         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8125             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8126             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8127             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8128             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8129             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8130             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8131             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8132             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8133             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8134                 return -EINVAL;
8135
8136         /* No rx interrupts will be generated if both are zero */
8137         if ((ec->rx_coalesce_usecs == 0) &&
8138             (ec->rx_max_coalesced_frames == 0))
8139                 return -EINVAL;
8140
8141         /* No tx interrupts will be generated if both are zero */
8142         if ((ec->tx_coalesce_usecs == 0) &&
8143             (ec->tx_max_coalesced_frames == 0))
8144                 return -EINVAL;
8145
8146         /* Only copy relevant parameters, ignore all others. */
8147         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8148         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8149         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8150         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8151         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8152         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8153         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8154         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8155         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8156
8157         if (netif_running(dev)) {
8158                 tg3_full_lock(tp, 0);
8159                 __tg3_set_coalesce(tp, &tp->coal);
8160                 tg3_full_unlock(tp);
8161         }
8162         return 0;
8163 }
8164
8165 static struct ethtool_ops tg3_ethtool_ops = {
8166         .get_settings           = tg3_get_settings,
8167         .set_settings           = tg3_set_settings,
8168         .get_drvinfo            = tg3_get_drvinfo,
8169         .get_regs_len           = tg3_get_regs_len,
8170         .get_regs               = tg3_get_regs,
8171         .get_wol                = tg3_get_wol,
8172         .set_wol                = tg3_set_wol,
8173         .get_msglevel           = tg3_get_msglevel,
8174         .set_msglevel           = tg3_set_msglevel,
8175         .nway_reset             = tg3_nway_reset,
8176         .get_link               = ethtool_op_get_link,
8177         .get_eeprom_len         = tg3_get_eeprom_len,
8178         .get_eeprom             = tg3_get_eeprom,
8179         .set_eeprom             = tg3_set_eeprom,
8180         .get_ringparam          = tg3_get_ringparam,
8181         .set_ringparam          = tg3_set_ringparam,
8182         .get_pauseparam         = tg3_get_pauseparam,
8183         .set_pauseparam         = tg3_set_pauseparam,
8184         .get_rx_csum            = tg3_get_rx_csum,
8185         .set_rx_csum            = tg3_set_rx_csum,
8186         .get_tx_csum            = ethtool_op_get_tx_csum,
8187         .set_tx_csum            = tg3_set_tx_csum,
8188         .get_sg                 = ethtool_op_get_sg,
8189         .set_sg                 = ethtool_op_set_sg,
8190 #if TG3_TSO_SUPPORT != 0
8191         .get_tso                = ethtool_op_get_tso,
8192         .set_tso                = tg3_set_tso,
8193 #endif
8194         .self_test_count        = tg3_get_test_count,
8195         .self_test              = tg3_self_test,
8196         .get_strings            = tg3_get_strings,
8197         .get_stats_count        = tg3_get_stats_count,
8198         .get_ethtool_stats      = tg3_get_ethtool_stats,
8199         .get_coalesce           = tg3_get_coalesce,
8200         .set_coalesce           = tg3_set_coalesce,
8201 };
8202
8203 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8204 {
8205         u32 cursize, val;
8206
8207         tp->nvram_size = EEPROM_CHIP_SIZE;
8208
8209         if (tg3_nvram_read(tp, 0, &val) != 0)
8210                 return;
8211
8212         if (swab32(val) != TG3_EEPROM_MAGIC)
8213                 return;
8214
8215         /*
8216          * Size the chip by reading offsets at increasing powers of two.
8217          * When we encounter our validation signature, we know the addressing
8218          * has wrapped around, and thus have our chip size.
8219          */
8220         cursize = 0x800;
8221
8222         while (cursize < tp->nvram_size) {
8223                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8224                         return;
8225
8226                 if (swab32(val) == TG3_EEPROM_MAGIC)
8227                         break;
8228
8229                 cursize <<= 1;
8230         }
8231
8232         tp->nvram_size = cursize;
8233 }
8234                 
8235 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8236 {
8237         u32 val;
8238
8239         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8240                 if (val != 0) {
8241                         tp->nvram_size = (val >> 16) * 1024;
8242                         return;
8243                 }
8244         }
8245         tp->nvram_size = 0x20000;
8246 }
8247
8248 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8249 {
8250         u32 nvcfg1;
8251
8252         nvcfg1 = tr32(NVRAM_CFG1);
8253         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8254                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8255         }
8256         else {
8257                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8258                 tw32(NVRAM_CFG1, nvcfg1);
8259         }
8260
8261         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8262                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8263                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8264                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8265                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8266                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8267                                 break;
8268                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8269                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8270                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8271                                 break;
8272                         case FLASH_VENDOR_ATMEL_EEPROM:
8273                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8274                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8275                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8276                                 break;
8277                         case FLASH_VENDOR_ST:
8278                                 tp->nvram_jedecnum = JEDEC_ST;
8279                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8280                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8281                                 break;
8282                         case FLASH_VENDOR_SAIFUN:
8283                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8284                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8285                                 break;
8286                         case FLASH_VENDOR_SST_SMALL:
8287                         case FLASH_VENDOR_SST_LARGE:
8288                                 tp->nvram_jedecnum = JEDEC_SST;
8289                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8290                                 break;
8291                 }
8292         }
8293         else {
8294                 tp->nvram_jedecnum = JEDEC_ATMEL;
8295                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8296                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8297         }
8298 }
8299
8300 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8301 {
8302         u32 nvcfg1;
8303
8304         nvcfg1 = tr32(NVRAM_CFG1);
8305
8306         /* NVRAM protection for TPM */
8307         if (nvcfg1 & (1 << 27))
8308                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8309
8310         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8311                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8312                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8313                         tp->nvram_jedecnum = JEDEC_ATMEL;
8314                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8315                         break;
8316                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8317                         tp->nvram_jedecnum = JEDEC_ATMEL;
8318                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8319                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8320                         break;
8321                 case FLASH_5752VENDOR_ST_M45PE10:
8322                 case FLASH_5752VENDOR_ST_M45PE20:
8323                 case FLASH_5752VENDOR_ST_M45PE40:
8324                         tp->nvram_jedecnum = JEDEC_ST;
8325                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8326                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8327                         break;
8328         }
8329
8330         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8331                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8332                         case FLASH_5752PAGE_SIZE_256:
8333                                 tp->nvram_pagesize = 256;
8334                                 break;
8335                         case FLASH_5752PAGE_SIZE_512:
8336                                 tp->nvram_pagesize = 512;
8337                                 break;
8338                         case FLASH_5752PAGE_SIZE_1K:
8339                                 tp->nvram_pagesize = 1024;
8340                                 break;
8341                         case FLASH_5752PAGE_SIZE_2K:
8342                                 tp->nvram_pagesize = 2048;
8343                                 break;
8344                         case FLASH_5752PAGE_SIZE_4K:
8345                                 tp->nvram_pagesize = 4096;
8346                                 break;
8347                         case FLASH_5752PAGE_SIZE_264:
8348                                 tp->nvram_pagesize = 264;
8349                                 break;
8350                 }
8351         }
8352         else {
8353                 /* For eeprom, set pagesize to maximum eeprom size */
8354                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8355
8356                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8357                 tw32(NVRAM_CFG1, nvcfg1);
8358         }
8359 }
8360
8361 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8362 static void __devinit tg3_nvram_init(struct tg3 *tp)
8363 {
8364         int j;
8365
8366         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8367                 return;
8368
8369         tw32_f(GRC_EEPROM_ADDR,
8370              (EEPROM_ADDR_FSM_RESET |
8371               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8372                EEPROM_ADDR_CLKPERD_SHIFT)));
8373
8374         /* XXX schedule_timeout() ... */
8375         for (j = 0; j < 100; j++)
8376                 udelay(10);
8377
8378         /* Enable seeprom accesses. */
8379         tw32_f(GRC_LOCAL_CTRL,
8380              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8381         udelay(100);
8382
8383         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8384             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8385                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8386
8387                 tg3_enable_nvram_access(tp);
8388
8389                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8390                         tg3_get_5752_nvram_info(tp);
8391                 else
8392                         tg3_get_nvram_info(tp);
8393
8394                 tg3_get_nvram_size(tp);
8395
8396                 tg3_disable_nvram_access(tp);
8397
8398         } else {
8399                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8400
8401                 tg3_get_eeprom_size(tp);
8402         }
8403 }
8404
8405 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8406                                         u32 offset, u32 *val)
8407 {
8408         u32 tmp;
8409         int i;
8410
8411         if (offset > EEPROM_ADDR_ADDR_MASK ||
8412             (offset % 4) != 0)
8413                 return -EINVAL;
8414
8415         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8416                                         EEPROM_ADDR_DEVID_MASK |
8417                                         EEPROM_ADDR_READ);
8418         tw32(GRC_EEPROM_ADDR,
8419              tmp |
8420              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8421              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8422               EEPROM_ADDR_ADDR_MASK) |
8423              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8424
8425         for (i = 0; i < 10000; i++) {
8426                 tmp = tr32(GRC_EEPROM_ADDR);
8427
8428                 if (tmp & EEPROM_ADDR_COMPLETE)
8429                         break;
8430                 udelay(100);
8431         }
8432         if (!(tmp & EEPROM_ADDR_COMPLETE))
8433                 return -EBUSY;
8434
8435         *val = tr32(GRC_EEPROM_DATA);
8436         return 0;
8437 }
8438
8439 #define NVRAM_CMD_TIMEOUT 10000
8440
8441 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8442 {
8443         int i;
8444
8445         tw32(NVRAM_CMD, nvram_cmd);
8446         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8447                 udelay(10);
8448                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8449                         udelay(10);
8450                         break;
8451                 }
8452         }
8453         if (i == NVRAM_CMD_TIMEOUT) {
8454                 return -EBUSY;
8455         }
8456         return 0;
8457 }
8458
8459 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8460 {
8461         int ret;
8462
8463         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8464                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8465                 return -EINVAL;
8466         }
8467
8468         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8469                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8470
8471         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8472                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8473                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8474
8475                 offset = ((offset / tp->nvram_pagesize) <<
8476                           ATMEL_AT45DB0X1B_PAGE_POS) +
8477                         (offset % tp->nvram_pagesize);
8478         }
8479
8480         if (offset > NVRAM_ADDR_MSK)
8481                 return -EINVAL;
8482
8483         tg3_nvram_lock(tp);
8484
8485         tg3_enable_nvram_access(tp);
8486
8487         tw32(NVRAM_ADDR, offset);
8488         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8489                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8490
8491         if (ret == 0)
8492                 *val = swab32(tr32(NVRAM_RDDATA));
8493
8494         tg3_nvram_unlock(tp);
8495
8496         tg3_disable_nvram_access(tp);
8497
8498         return ret;
8499 }
8500
8501 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8502                                     u32 offset, u32 len, u8 *buf)
8503 {
8504         int i, j, rc = 0;
8505         u32 val;
8506
8507         for (i = 0; i < len; i += 4) {
8508                 u32 addr, data;
8509
8510                 addr = offset + i;
8511
8512                 memcpy(&data, buf + i, 4);
8513
8514                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8515
8516                 val = tr32(GRC_EEPROM_ADDR);
8517                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8518
8519                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8520                         EEPROM_ADDR_READ);
8521                 tw32(GRC_EEPROM_ADDR, val |
8522                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8523                         (addr & EEPROM_ADDR_ADDR_MASK) |
8524                         EEPROM_ADDR_START |
8525                         EEPROM_ADDR_WRITE);
8526                 
8527                 for (j = 0; j < 10000; j++) {
8528                         val = tr32(GRC_EEPROM_ADDR);
8529
8530                         if (val & EEPROM_ADDR_COMPLETE)
8531                                 break;
8532                         udelay(100);
8533                 }
8534                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8535                         rc = -EBUSY;
8536                         break;
8537                 }
8538         }
8539
8540         return rc;
8541 }
8542
8543 /* offset and length are dword aligned */
8544 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8545                 u8 *buf)
8546 {
8547         int ret = 0;
8548         u32 pagesize = tp->nvram_pagesize;
8549         u32 pagemask = pagesize - 1;
8550         u32 nvram_cmd;
8551         u8 *tmp;
8552
8553         tmp = kmalloc(pagesize, GFP_KERNEL);
8554         if (tmp == NULL)
8555                 return -ENOMEM;
8556
8557         while (len) {
8558                 int j;
8559                 u32 phy_addr, page_off, size;
8560
8561                 phy_addr = offset & ~pagemask;
8562         
8563                 for (j = 0; j < pagesize; j += 4) {
8564                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8565                                                 (u32 *) (tmp + j))))
8566                                 break;
8567                 }
8568                 if (ret)
8569                         break;
8570
8571                 page_off = offset & pagemask;
8572                 size = pagesize;
8573                 if (len < size)
8574                         size = len;
8575
8576                 len -= size;
8577
8578                 memcpy(tmp + page_off, buf, size);
8579
8580                 offset = offset + (pagesize - page_off);
8581
8582                 tg3_enable_nvram_access(tp);
8583
8584                 /*
8585                  * Before we can erase the flash page, we need
8586                  * to issue a special "write enable" command.
8587                  */
8588                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8589
8590                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8591                         break;
8592
8593                 /* Erase the target page */
8594                 tw32(NVRAM_ADDR, phy_addr);
8595
8596                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8597                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8598
8599                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8600                         break;
8601
8602                 /* Issue another write enable to start the write. */
8603                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8604
8605                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8606                         break;
8607
8608                 for (j = 0; j < pagesize; j += 4) {
8609                         u32 data;
8610
8611                         data = *((u32 *) (tmp + j));
8612                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8613
8614                         tw32(NVRAM_ADDR, phy_addr + j);
8615
8616                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8617                                 NVRAM_CMD_WR;
8618
8619                         if (j == 0)
8620                                 nvram_cmd |= NVRAM_CMD_FIRST;
8621                         else if (j == (pagesize - 4))
8622                                 nvram_cmd |= NVRAM_CMD_LAST;
8623
8624                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8625                                 break;
8626                 }
8627                 if (ret)
8628                         break;
8629         }
8630
8631         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8632         tg3_nvram_exec_cmd(tp, nvram_cmd);
8633
8634         kfree(tmp);
8635
8636         return ret;
8637 }
8638
8639 /* offset and length are dword aligned */
8640 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8641                 u8 *buf)
8642 {
8643         int i, ret = 0;
8644
8645         for (i = 0; i < len; i += 4, offset += 4) {
8646                 u32 data, page_off, phy_addr, nvram_cmd;
8647
8648                 memcpy(&data, buf + i, 4);
8649                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8650
8651                 page_off = offset % tp->nvram_pagesize;
8652
8653                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8654                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8655
8656                         phy_addr = ((offset / tp->nvram_pagesize) <<
8657                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8658                 }
8659                 else {
8660                         phy_addr = offset;
8661                 }
8662
8663                 tw32(NVRAM_ADDR, phy_addr);
8664
8665                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8666
8667                 if ((page_off == 0) || (i == 0))
8668                         nvram_cmd |= NVRAM_CMD_FIRST;
8669                 else if (page_off == (tp->nvram_pagesize - 4))
8670                         nvram_cmd |= NVRAM_CMD_LAST;
8671
8672                 if (i == (len - 4))
8673                         nvram_cmd |= NVRAM_CMD_LAST;
8674
8675                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
8676                         (nvram_cmd & NVRAM_CMD_FIRST)) {
8677
8678                         if ((ret = tg3_nvram_exec_cmd(tp,
8679                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8680                                 NVRAM_CMD_DONE)))
8681
8682                                 break;
8683                 }
8684                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8685                         /* We always do complete word writes to eeprom. */
8686                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8687                 }
8688
8689                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8690                         break;
8691         }
8692         return ret;
8693 }
8694
8695 /* offset and length are dword aligned */
8696 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8697 {
8698         int ret;
8699
8700         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8701                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8702                 return -EINVAL;
8703         }
8704
8705         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8706                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8707                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8708                 udelay(40);
8709         }
8710
8711         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8712                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8713         }
8714         else {
8715                 u32 grc_mode;
8716
8717                 tg3_nvram_lock(tp);
8718
8719                 tg3_enable_nvram_access(tp);
8720                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8721                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8722                         tw32(NVRAM_WRITE1, 0x406);
8723
8724                 grc_mode = tr32(GRC_MODE);
8725                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8726
8727                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8728                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8729
8730                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8731                                 buf);
8732                 }
8733                 else {
8734                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8735                                 buf);
8736                 }
8737
8738                 grc_mode = tr32(GRC_MODE);
8739                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8740
8741                 tg3_disable_nvram_access(tp);
8742                 tg3_nvram_unlock(tp);
8743         }
8744
8745         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8746                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8747                 udelay(40);
8748         }
8749
8750         return ret;
8751 }
8752
8753 struct subsys_tbl_ent {
8754         u16 subsys_vendor, subsys_devid;
8755         u32 phy_id;
8756 };
8757
8758 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8759         /* Broadcom boards. */
8760         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8761         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8762         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8763         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8764         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8765         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8766         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8767         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8768         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8769         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8770         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8771
8772         /* 3com boards. */
8773         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8774         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8775         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8776         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8777         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8778
8779         /* DELL boards. */
8780         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8781         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8782         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8783         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8784
8785         /* Compaq boards. */
8786         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8787         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8788         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8789         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8790         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8791
8792         /* IBM boards. */
8793         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8794 };
8795
8796 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8797 {
8798         int i;
8799
8800         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8801                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8802                      tp->pdev->subsystem_vendor) &&
8803                     (subsys_id_to_phy_id[i].subsys_devid ==
8804                      tp->pdev->subsystem_device))
8805                         return &subsys_id_to_phy_id[i];
8806         }
8807         return NULL;
8808 }
8809
8810 /* Since this function may be called in D3-hot power state during
8811  * tg3_init_one(), only config cycles are allowed.
8812  */
8813 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8814 {
8815         u32 val;
8816
8817         /* Make sure register accesses (indirect or otherwise)
8818          * will function correctly.
8819          */
8820         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8821                                tp->misc_host_ctrl);
8822
8823         tp->phy_id = PHY_ID_INVALID;
8824         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8825
8826         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8827         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8828                 u32 nic_cfg, led_cfg;
8829                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8830                 int eeprom_phy_serdes = 0;
8831
8832                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8833                 tp->nic_sram_data_cfg = nic_cfg;
8834
8835                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8836                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8837                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8838                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8839                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8840                     (ver > 0) && (ver < 0x100))
8841                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8842
8843                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8844                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8845                         eeprom_phy_serdes = 1;
8846
8847                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8848                 if (nic_phy_id != 0) {
8849                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8850                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8851
8852                         eeprom_phy_id  = (id1 >> 16) << 10;
8853                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
8854                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
8855                 } else
8856                         eeprom_phy_id = 0;
8857
8858                 tp->phy_id = eeprom_phy_id;
8859                 if (eeprom_phy_serdes) {
8860                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8861                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
8862                         else
8863                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8864                 }
8865
8866                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8867                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8868                                     SHASTA_EXT_LED_MODE_MASK);
8869                 else
8870                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8871
8872                 switch (led_cfg) {
8873                 default:
8874                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8875                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8876                         break;
8877
8878                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8879                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8880                         break;
8881
8882                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8883                         tp->led_ctrl = LED_CTRL_MODE_MAC;
8884
8885                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8886                          * read on some older 5700/5701 bootcode.
8887                          */
8888                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8889                             ASIC_REV_5700 ||
8890                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
8891                             ASIC_REV_5701)
8892                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8893
8894                         break;
8895
8896                 case SHASTA_EXT_LED_SHARED:
8897                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
8898                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8899                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
8900                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8901                                                  LED_CTRL_MODE_PHY_2);
8902                         break;
8903
8904                 case SHASTA_EXT_LED_MAC:
8905                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
8906                         break;
8907
8908                 case SHASTA_EXT_LED_COMBO:
8909                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
8910                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
8911                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8912                                                  LED_CTRL_MODE_PHY_2);
8913                         break;
8914
8915                 };
8916
8917                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8918                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
8919                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
8920                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8921
8922                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8923                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8924                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
8925                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
8926
8927                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8928                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
8929                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8930                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8931                 }
8932                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
8933                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
8934
8935                 if (cfg2 & (1 << 17))
8936                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
8937
8938                 /* serdes signal pre-emphasis in register 0x590 set by */
8939                 /* bootcode if bit 18 is set */
8940                 if (cfg2 & (1 << 18))
8941                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8942         }
8943 }
8944
8945 static int __devinit tg3_phy_probe(struct tg3 *tp)
8946 {
8947         u32 hw_phy_id_1, hw_phy_id_2;
8948         u32 hw_phy_id, hw_phy_id_masked;
8949         int err;
8950
8951         /* Reading the PHY ID register can conflict with ASF
8952          * firwmare access to the PHY hardware.
8953          */
8954         err = 0;
8955         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
8956                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
8957         } else {
8958                 /* Now read the physical PHY_ID from the chip and verify
8959                  * that it is sane.  If it doesn't look good, we fall back
8960                  * to either the hard-coded table based PHY_ID and failing
8961                  * that the value found in the eeprom area.
8962                  */
8963                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
8964                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
8965
8966                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
8967                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
8968                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
8969
8970                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
8971         }
8972
8973         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8974                 tp->phy_id = hw_phy_id;
8975                 if (hw_phy_id_masked == PHY_ID_BCM8002)
8976                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8977                 else
8978                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
8979         } else {
8980                 if (tp->phy_id != PHY_ID_INVALID) {
8981                         /* Do nothing, phy ID already set up in
8982                          * tg3_get_eeprom_hw_cfg().
8983                          */
8984                 } else {
8985                         struct subsys_tbl_ent *p;
8986
8987                         /* No eeprom signature?  Try the hardcoded
8988                          * subsys device table.
8989                          */
8990                         p = lookup_by_subsys(tp);
8991                         if (!p)
8992                                 return -ENODEV;
8993
8994                         tp->phy_id = p->phy_id;
8995                         if (!tp->phy_id ||
8996                             tp->phy_id == PHY_ID_BCM8002)
8997                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8998                 }
8999         }
9000
9001         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9002             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9003                 u32 bmsr, adv_reg, tg3_ctrl;
9004
9005                 tg3_readphy(tp, MII_BMSR, &bmsr);
9006                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9007                     (bmsr & BMSR_LSTATUS))
9008                         goto skip_phy_reset;
9009                     
9010                 err = tg3_phy_reset(tp);
9011                 if (err)
9012                         return err;
9013
9014                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9015                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9016                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9017                 tg3_ctrl = 0;
9018                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9019                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9020                                     MII_TG3_CTRL_ADV_1000_FULL);
9021                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9022                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9023                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9024                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9025                 }
9026
9027                 if (!tg3_copper_is_advertising_all(tp)) {
9028                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9029
9030                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9031                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9032
9033                         tg3_writephy(tp, MII_BMCR,
9034                                      BMCR_ANENABLE | BMCR_ANRESTART);
9035                 }
9036                 tg3_phy_set_wirespeed(tp);
9037
9038                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9039                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9040                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9041         }
9042
9043 skip_phy_reset:
9044         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9045                 err = tg3_init_5401phy_dsp(tp);
9046                 if (err)
9047                         return err;
9048         }
9049
9050         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9051                 err = tg3_init_5401phy_dsp(tp);
9052         }
9053
9054         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9055                 tp->link_config.advertising =
9056                         (ADVERTISED_1000baseT_Half |
9057                          ADVERTISED_1000baseT_Full |
9058                          ADVERTISED_Autoneg |
9059                          ADVERTISED_FIBRE);
9060         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9061                 tp->link_config.advertising &=
9062                         ~(ADVERTISED_1000baseT_Half |
9063                           ADVERTISED_1000baseT_Full);
9064
9065         return err;
9066 }
9067
9068 static void __devinit tg3_read_partno(struct tg3 *tp)
9069 {
9070         unsigned char vpd_data[256];
9071         int i;
9072
9073         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9074                 /* Sun decided not to put the necessary bits in the
9075                  * NVRAM of their onboard tg3 parts :(
9076                  */
9077                 strcpy(tp->board_part_number, "Sun 570X");
9078                 return;
9079         }
9080
9081         for (i = 0; i < 256; i += 4) {
9082                 u32 tmp;
9083
9084                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9085                         goto out_not_found;
9086
9087                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9088                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9089                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9090                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9091         }
9092
9093         /* Now parse and find the part number. */
9094         for (i = 0; i < 256; ) {
9095                 unsigned char val = vpd_data[i];
9096                 int block_end;
9097
9098                 if (val == 0x82 || val == 0x91) {
9099                         i = (i + 3 +
9100                              (vpd_data[i + 1] +
9101                               (vpd_data[i + 2] << 8)));
9102                         continue;
9103                 }
9104
9105                 if (val != 0x90)
9106                         goto out_not_found;
9107
9108                 block_end = (i + 3 +
9109                              (vpd_data[i + 1] +
9110                               (vpd_data[i + 2] << 8)));
9111                 i += 3;
9112                 while (i < block_end) {
9113                         if (vpd_data[i + 0] == 'P' &&
9114                             vpd_data[i + 1] == 'N') {
9115                                 int partno_len = vpd_data[i + 2];
9116
9117                                 if (partno_len > 24)
9118                                         goto out_not_found;
9119
9120                                 memcpy(tp->board_part_number,
9121                                        &vpd_data[i + 3],
9122                                        partno_len);
9123
9124                                 /* Success. */
9125                                 return;
9126                         }
9127                 }
9128
9129                 /* Part number not found. */
9130                 goto out_not_found;
9131         }
9132
9133 out_not_found:
9134         strcpy(tp->board_part_number, "none");
9135 }
9136
9137 #ifdef CONFIG_SPARC64
9138 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9139 {
9140         struct pci_dev *pdev = tp->pdev;
9141         struct pcidev_cookie *pcp = pdev->sysdata;
9142
9143         if (pcp != NULL) {
9144                 int node = pcp->prom_node;
9145                 u32 venid;
9146                 int err;
9147
9148                 err = prom_getproperty(node, "subsystem-vendor-id",
9149                                        (char *) &venid, sizeof(venid));
9150                 if (err == 0 || err == -1)
9151                         return 0;
9152                 if (venid == PCI_VENDOR_ID_SUN)
9153                         return 1;
9154         }
9155         return 0;
9156 }
9157 #endif
9158
9159 static int __devinit tg3_get_invariants(struct tg3 *tp)
9160 {
9161         static struct pci_device_id write_reorder_chipsets[] = {
9162                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9163                              PCI_DEVICE_ID_INTEL_82801AA_8) },
9164                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9165                              PCI_DEVICE_ID_INTEL_82801AB_8) },
9166                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9167                              PCI_DEVICE_ID_INTEL_82801BA_11) },
9168                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9169                              PCI_DEVICE_ID_INTEL_82801BA_6) },
9170                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9171                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9172                 { },
9173         };
9174         u32 misc_ctrl_reg;
9175         u32 cacheline_sz_reg;
9176         u32 pci_state_reg, grc_misc_cfg;
9177         u32 val;
9178         u16 pci_cmd;
9179         int err;
9180
9181 #ifdef CONFIG_SPARC64
9182         if (tg3_is_sun_570X(tp))
9183                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9184 #endif
9185
9186         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
9187          * reordering to the mailbox registers done by the host
9188          * controller can cause major troubles.  We read back from
9189          * every mailbox register write to force the writes to be
9190          * posted to the chip in order.
9191          */
9192         if (pci_dev_present(write_reorder_chipsets))
9193                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9194
9195         /* Force memory write invalidate off.  If we leave it on,
9196          * then on 5700_BX chips we have to enable a workaround.
9197          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9198          * to match the cacheline size.  The Broadcom driver have this
9199          * workaround but turns MWI off all the times so never uses
9200          * it.  This seems to suggest that the workaround is insufficient.
9201          */
9202         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9203         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9204         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9205
9206         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9207          * has the register indirect write enable bit set before
9208          * we try to access any of the MMIO registers.  It is also
9209          * critical that the PCI-X hw workaround situation is decided
9210          * before that as well.
9211          */
9212         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9213                               &misc_ctrl_reg);
9214
9215         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9216                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9217
9218         /* Wrong chip ID in 5752 A0. This code can be removed later
9219          * as A0 is not in production.
9220          */
9221         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9222                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9223
9224         /* Find msi capability. */
9225         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9226                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9227
9228         /* Initialize misc host control in PCI block. */
9229         tp->misc_host_ctrl |= (misc_ctrl_reg &
9230                                MISC_HOST_CTRL_CHIPREV);
9231         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9232                                tp->misc_host_ctrl);
9233
9234         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9235                               &cacheline_sz_reg);
9236
9237         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9238         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9239         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9240         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9241
9242         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9243             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9244             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9245                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9246
9247         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9248             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9249                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9250
9251         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9252                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9253
9254         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9255             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9256             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9257                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9258
9259         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9260                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9261
9262         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9263             tp->pci_lat_timer < 64) {
9264                 tp->pci_lat_timer = 64;
9265
9266                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9267                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9268                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9269                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9270
9271                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9272                                        cacheline_sz_reg);
9273         }
9274
9275         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9276                               &pci_state_reg);
9277
9278         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9279                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9280
9281                 /* If this is a 5700 BX chipset, and we are in PCI-X
9282                  * mode, enable register write workaround.
9283                  *
9284                  * The workaround is to use indirect register accesses
9285                  * for all chip writes not to mailbox registers.
9286                  */
9287                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9288                         u32 pm_reg;
9289                         u16 pci_cmd;
9290
9291                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9292
9293                         /* The chip can have it's power management PCI config
9294                          * space registers clobbered due to this bug.
9295                          * So explicitly force the chip into D0 here.
9296                          */
9297                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9298                                               &pm_reg);
9299                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9300                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9301                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9302                                                pm_reg);
9303
9304                         /* Also, force SERR#/PERR# in PCI command. */
9305                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9306                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9307                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9308                 }
9309         }
9310
9311         /* Back to back register writes can cause problems on this chip,
9312          * the workaround is to read back all reg writes except those to
9313          * mailbox regs.  See tg3_write_indirect_reg32().
9314          *
9315          * PCI Express 5750_A0 rev chips need this workaround too.
9316          */
9317         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9318             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9319              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9320                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9321
9322         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9323                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9324         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9325                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9326
9327         /* Chip-specific fixup from Broadcom driver */
9328         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9329             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9330                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9331                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9332         }
9333
9334         tp->read32 = tg3_read32;
9335         tp->write32 = tg3_write_indirect_reg32;
9336         tp->write32_mbox = tg3_write32;
9337         tp->write32_tx_mbox = tg3_write32_tx_mbox;
9338         tp->write32_rx_mbox = tg3_write32_rx_mbox;
9339
9340         /* Get eeprom hw config before calling tg3_set_power_state().
9341          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9342          * determined before calling tg3_set_power_state() so that
9343          * we know whether or not to switch out of Vaux power.
9344          * When the flag is set, it means that GPIO1 is used for eeprom
9345          * write protect and also implies that it is a LOM where GPIOs
9346          * are not used to switch power.
9347          */ 
9348         tg3_get_eeprom_hw_cfg(tp);
9349
9350         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9351          * GPIO1 driven high will bring 5700's external PHY out of reset.
9352          * It is also used as eeprom write protect on LOMs.
9353          */
9354         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9355         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9356             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9357                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9358                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9359         /* Unused GPIO3 must be driven as output on 5752 because there
9360          * are no pull-up resistors on unused GPIO pins.
9361          */
9362         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9363                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9364
9365         /* Force the chip into D0. */
9366         err = tg3_set_power_state(tp, 0);
9367         if (err) {
9368                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9369                        pci_name(tp->pdev));
9370                 return err;
9371         }
9372
9373         /* 5700 B0 chips do not support checksumming correctly due
9374          * to hardware bugs.
9375          */
9376         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9377                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9378
9379         /* Pseudo-header checksum is done by hardware logic and not
9380          * the offload processers, so make the chip do the pseudo-
9381          * header checksums on receive.  For transmit it is more
9382          * convenient to do the pseudo-header checksum in software
9383          * as Linux does that on transmit for us in all cases.
9384          */
9385         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9386         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9387
9388         /* Derive initial jumbo mode from MTU assigned in
9389          * ether_setup() via the alloc_etherdev() call
9390          */
9391         if (tp->dev->mtu > ETH_DATA_LEN &&
9392             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)
9393                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9394
9395         /* Determine WakeOnLan speed to use. */
9396         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9397             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9398             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9399             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9400                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9401         } else {
9402                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9403         }
9404
9405         /* A few boards don't want Ethernet@WireSpeed phy feature */
9406         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9407             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9408              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9409              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9410             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9411                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9412
9413         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9414             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9415                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9416         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9417                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9418
9419         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9420                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9421
9422         tp->coalesce_mode = 0;
9423         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9424             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9425                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9426
9427         /* Initialize MAC MI mode, polling disabled. */
9428         tw32_f(MAC_MI_MODE, tp->mi_mode);
9429         udelay(80);
9430
9431         /* Initialize data/descriptor byte/word swapping. */
9432         val = tr32(GRC_MODE);
9433         val &= GRC_MODE_HOST_STACKUP;
9434         tw32(GRC_MODE, val | tp->grc_mode);
9435
9436         tg3_switch_clocks(tp);
9437
9438         /* Clear this out for sanity. */
9439         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9440
9441         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9442                               &pci_state_reg);
9443         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9444             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9445                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9446
9447                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9448                     chiprevid == CHIPREV_ID_5701_B0 ||
9449                     chiprevid == CHIPREV_ID_5701_B2 ||
9450                     chiprevid == CHIPREV_ID_5701_B5) {
9451                         void __iomem *sram_base;
9452
9453                         /* Write some dummy words into the SRAM status block
9454                          * area, see if it reads back correctly.  If the return
9455                          * value is bad, force enable the PCIX workaround.
9456                          */
9457                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9458
9459                         writel(0x00000000, sram_base);
9460                         writel(0x00000000, sram_base + 4);
9461                         writel(0xffffffff, sram_base + 4);
9462                         if (readl(sram_base) != 0x00000000)
9463                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9464                 }
9465         }
9466
9467         udelay(50);
9468         tg3_nvram_init(tp);
9469
9470         grc_misc_cfg = tr32(GRC_MISC_CFG);
9471         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9472
9473         /* Broadcom's driver says that CIOBE multisplit has a bug */
9474 #if 0
9475         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9476             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9477                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9478                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9479         }
9480 #endif
9481         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9482             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9483              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9484                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9485
9486         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9487             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9488                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9489         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9490                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9491                                       HOSTCC_MODE_CLRTICK_TXBD);
9492
9493                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9494                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9495                                        tp->misc_host_ctrl);
9496         }
9497
9498         /* these are limited to 10/100 only */
9499         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9500              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9501             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9502              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9503              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9504               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9505               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9506             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9507              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9508               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9509                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9510
9511         err = tg3_phy_probe(tp);
9512         if (err) {
9513                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9514                        pci_name(tp->pdev), err);
9515                 /* ... but do not return immediately ... */
9516         }
9517
9518         tg3_read_partno(tp);
9519
9520         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9521                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9522         } else {
9523                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9524                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9525                 else
9526                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9527         }
9528
9529         /* 5700 {AX,BX} chips have a broken status block link
9530          * change bit implementation, so we must use the
9531          * status register in those cases.
9532          */
9533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9534                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9535         else
9536                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9537
9538         /* The led_ctrl is set during tg3_phy_probe, here we might
9539          * have to force the link status polling mechanism based
9540          * upon subsystem IDs.
9541          */
9542         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9543             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9544                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9545                                   TG3_FLAG_USE_LINKCHG_REG);
9546         }
9547
9548         /* For all SERDES we poll the MAC status register. */
9549         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9550                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9551         else
9552                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9553
9554         /* 5700 BX chips need to have their TX producer index mailboxes
9555          * written twice to workaround a bug.
9556          */
9557         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9558                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9559         else
9560                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9561
9562         /* It seems all chips can get confused if TX buffers
9563          * straddle the 4GB address boundary in some cases.
9564          */
9565         tp->dev->hard_start_xmit = tg3_start_xmit;
9566
9567         tp->rx_offset = 2;
9568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9569             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9570                 tp->rx_offset = 0;
9571
9572         /* By default, disable wake-on-lan.  User can change this
9573          * using ETHTOOL_SWOL.
9574          */
9575         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9576
9577         return err;
9578 }
9579
9580 #ifdef CONFIG_SPARC64
9581 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9582 {
9583         struct net_device *dev = tp->dev;
9584         struct pci_dev *pdev = tp->pdev;
9585         struct pcidev_cookie *pcp = pdev->sysdata;
9586
9587         if (pcp != NULL) {
9588                 int node = pcp->prom_node;
9589
9590                 if (prom_getproplen(node, "local-mac-address") == 6) {
9591                         prom_getproperty(node, "local-mac-address",
9592                                          dev->dev_addr, 6);
9593                         return 0;
9594                 }
9595         }
9596         return -ENODEV;
9597 }
9598
9599 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9600 {
9601         struct net_device *dev = tp->dev;
9602
9603         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9604         return 0;
9605 }
9606 #endif
9607
9608 static int __devinit tg3_get_device_address(struct tg3 *tp)
9609 {
9610         struct net_device *dev = tp->dev;
9611         u32 hi, lo, mac_offset;
9612
9613 #ifdef CONFIG_SPARC64
9614         if (!tg3_get_macaddr_sparc(tp))
9615                 return 0;
9616 #endif
9617
9618         mac_offset = 0x7c;
9619         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9620              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9621             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9622                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9623                         mac_offset = 0xcc;
9624                 if (tg3_nvram_lock(tp))
9625                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9626                 else
9627                         tg3_nvram_unlock(tp);
9628         }
9629
9630         /* First try to get it from MAC address mailbox. */
9631         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9632         if ((hi >> 16) == 0x484b) {
9633                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9634                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9635
9636                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9637                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9638                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9639                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9640                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9641         }
9642         /* Next, try NVRAM. */
9643         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9644                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9645                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9646                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9647                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9648                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9649                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9650                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9651                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9652         }
9653         /* Finally just fetch it out of the MAC control regs. */
9654         else {
9655                 hi = tr32(MAC_ADDR_0_HIGH);
9656                 lo = tr32(MAC_ADDR_0_LOW);
9657
9658                 dev->dev_addr[5] = lo & 0xff;
9659                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9660                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9661                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9662                 dev->dev_addr[1] = hi & 0xff;
9663                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9664         }
9665
9666         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9667 #ifdef CONFIG_SPARC64
9668                 if (!tg3_get_default_macaddr_sparc(tp))
9669                         return 0;
9670 #endif
9671                 return -EINVAL;
9672         }
9673         return 0;
9674 }
9675
9676 #define BOUNDARY_SINGLE_CACHELINE       1
9677 #define BOUNDARY_MULTI_CACHELINE        2
9678
9679 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9680 {
9681         int cacheline_size;
9682         u8 byte;
9683         int goal;
9684
9685         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9686         if (byte == 0)
9687                 cacheline_size = 1024;
9688         else
9689                 cacheline_size = (int) byte * 4;
9690
9691         /* On 5703 and later chips, the boundary bits have no
9692          * effect.
9693          */
9694         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9695             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9696             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9697                 goto out;
9698
9699 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9700         goal = BOUNDARY_MULTI_CACHELINE;
9701 #else
9702 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9703         goal = BOUNDARY_SINGLE_CACHELINE;
9704 #else
9705         goal = 0;
9706 #endif
9707 #endif
9708
9709         if (!goal)
9710                 goto out;
9711
9712         /* PCI controllers on most RISC systems tend to disconnect
9713          * when a device tries to burst across a cache-line boundary.
9714          * Therefore, letting tg3 do so just wastes PCI bandwidth.
9715          *
9716          * Unfortunately, for PCI-E there are only limited
9717          * write-side controls for this, and thus for reads
9718          * we will still get the disconnects.  We'll also waste
9719          * these PCI cycles for both read and write for chips
9720          * other than 5700 and 5701 which do not implement the
9721          * boundary bits.
9722          */
9723         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9724             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9725                 switch (cacheline_size) {
9726                 case 16:
9727                 case 32:
9728                 case 64:
9729                 case 128:
9730                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9731                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9732                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9733                         } else {
9734                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9735                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9736                         }
9737                         break;
9738
9739                 case 256:
9740                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9741                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9742                         break;
9743
9744                 default:
9745                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9746                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9747                         break;
9748                 };
9749         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9750                 switch (cacheline_size) {
9751                 case 16:
9752                 case 32:
9753                 case 64:
9754                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9755                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9756                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9757                                 break;
9758                         }
9759                         /* fallthrough */
9760                 case 128:
9761                 default:
9762                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9763                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9764                         break;
9765                 };
9766         } else {
9767                 switch (cacheline_size) {
9768                 case 16:
9769                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9770                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9771                                         DMA_RWCTRL_WRITE_BNDRY_16);
9772                                 break;
9773                         }
9774                         /* fallthrough */
9775                 case 32:
9776                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9777                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9778                                         DMA_RWCTRL_WRITE_BNDRY_32);
9779                                 break;
9780                         }
9781                         /* fallthrough */
9782                 case 64:
9783                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9784                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9785                                         DMA_RWCTRL_WRITE_BNDRY_64);
9786                                 break;
9787                         }
9788                         /* fallthrough */
9789                 case 128:
9790                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9791                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9792                                         DMA_RWCTRL_WRITE_BNDRY_128);
9793                                 break;
9794                         }
9795                         /* fallthrough */
9796                 case 256:
9797                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
9798                                 DMA_RWCTRL_WRITE_BNDRY_256);
9799                         break;
9800                 case 512:
9801                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
9802                                 DMA_RWCTRL_WRITE_BNDRY_512);
9803                         break;
9804                 case 1024:
9805                 default:
9806                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9807                                 DMA_RWCTRL_WRITE_BNDRY_1024);
9808                         break;
9809                 };
9810         }
9811
9812 out:
9813         return val;
9814 }
9815
9816 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
9817 {
9818         struct tg3_internal_buffer_desc test_desc;
9819         u32 sram_dma_descs;
9820         int i, ret;
9821
9822         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
9823
9824         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
9825         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
9826         tw32(RDMAC_STATUS, 0);
9827         tw32(WDMAC_STATUS, 0);
9828
9829         tw32(BUFMGR_MODE, 0);
9830         tw32(FTQ_RESET, 0);
9831
9832         test_desc.addr_hi = ((u64) buf_dma) >> 32;
9833         test_desc.addr_lo = buf_dma & 0xffffffff;
9834         test_desc.nic_mbuf = 0x00002100;
9835         test_desc.len = size;
9836
9837         /*
9838          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
9839          * the *second* time the tg3 driver was getting loaded after an
9840          * initial scan.
9841          *
9842          * Broadcom tells me:
9843          *   ...the DMA engine is connected to the GRC block and a DMA
9844          *   reset may affect the GRC block in some unpredictable way...
9845          *   The behavior of resets to individual blocks has not been tested.
9846          *
9847          * Broadcom noted the GRC reset will also reset all sub-components.
9848          */
9849         if (to_device) {
9850                 test_desc.cqid_sqid = (13 << 8) | 2;
9851
9852                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
9853                 udelay(40);
9854         } else {
9855                 test_desc.cqid_sqid = (16 << 8) | 7;
9856
9857                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
9858                 udelay(40);
9859         }
9860         test_desc.flags = 0x00000005;
9861
9862         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
9863                 u32 val;
9864
9865                 val = *(((u32 *)&test_desc) + i);
9866                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
9867                                        sram_dma_descs + (i * sizeof(u32)));
9868                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
9869         }
9870         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
9871
9872         if (to_device) {
9873                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
9874         } else {
9875                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
9876         }
9877
9878         ret = -ENODEV;
9879         for (i = 0; i < 40; i++) {
9880                 u32 val;
9881
9882                 if (to_device)
9883                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
9884                 else
9885                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
9886                 if ((val & 0xffff) == sram_dma_descs) {
9887                         ret = 0;
9888                         break;
9889                 }
9890
9891                 udelay(100);
9892         }
9893
9894         return ret;
9895 }
9896
9897 #define TEST_BUFFER_SIZE        0x2000
9898
9899 static int __devinit tg3_test_dma(struct tg3 *tp)
9900 {
9901         dma_addr_t buf_dma;
9902         u32 *buf, saved_dma_rwctrl;
9903         int ret;
9904
9905         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
9906         if (!buf) {
9907                 ret = -ENOMEM;
9908                 goto out_nofree;
9909         }
9910
9911         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
9912                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
9913
9914         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
9915
9916         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9917                 /* DMA read watermark not used on PCIE */
9918                 tp->dma_rwctrl |= 0x00180000;
9919         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
9920                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
9921                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
9922                         tp->dma_rwctrl |= 0x003f0000;
9923                 else
9924                         tp->dma_rwctrl |= 0x003f000f;
9925         } else {
9926                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9927                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9928                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
9929
9930                         if (ccval == 0x6 || ccval == 0x7)
9931                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
9932
9933                         /* Set bit 23 to enable PCIX hw bug fix */
9934                         tp->dma_rwctrl |= 0x009f0000;
9935                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9936                         /* 5780 always in PCIX mode */
9937                         tp->dma_rwctrl |= 0x00144000;
9938                 } else {
9939                         tp->dma_rwctrl |= 0x001b000f;
9940                 }
9941         }
9942
9943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9945                 tp->dma_rwctrl &= 0xfffffff0;
9946
9947         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9948             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
9949                 /* Remove this if it causes problems for some boards. */
9950                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
9951
9952                 /* On 5700/5701 chips, we need to set this bit.
9953                  * Otherwise the chip will issue cacheline transactions
9954                  * to streamable DMA memory with not all the byte
9955                  * enables turned on.  This is an error on several
9956                  * RISC PCI controllers, in particular sparc64.
9957                  *
9958                  * On 5703/5704 chips, this bit has been reassigned
9959                  * a different meaning.  In particular, it is used
9960                  * on those chips to enable a PCI-X workaround.
9961                  */
9962                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
9963         }
9964
9965         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9966
9967 #if 0
9968         /* Unneeded, already done by tg3_get_invariants.  */
9969         tg3_switch_clocks(tp);
9970 #endif
9971
9972         ret = 0;
9973         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9974             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
9975                 goto out;
9976
9977         /* It is best to perform DMA test with maximum write burst size
9978          * to expose the 5700/5701 write DMA bug.
9979          */
9980         saved_dma_rwctrl = tp->dma_rwctrl;
9981         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9982         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9983
9984         while (1) {
9985                 u32 *p = buf, i;
9986
9987                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
9988                         p[i] = i;
9989
9990                 /* Send the buffer to the chip. */
9991                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
9992                 if (ret) {
9993                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
9994                         break;
9995                 }
9996
9997 #if 0
9998                 /* validate data reached card RAM correctly. */
9999                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10000                         u32 val;
10001                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10002                         if (le32_to_cpu(val) != p[i]) {
10003                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10004                                 /* ret = -ENODEV here? */
10005                         }
10006                         p[i] = 0;
10007                 }
10008 #endif
10009                 /* Now read it back. */
10010                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10011                 if (ret) {
10012                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10013
10014                         break;
10015                 }
10016
10017                 /* Verify it. */
10018                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10019                         if (p[i] == i)
10020                                 continue;
10021
10022                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10023                             DMA_RWCTRL_WRITE_BNDRY_16) {
10024                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10025                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10026                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10027                                 break;
10028                         } else {
10029                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10030                                 ret = -ENODEV;
10031                                 goto out;
10032                         }
10033                 }
10034
10035                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10036                         /* Success. */
10037                         ret = 0;
10038                         break;
10039                 }
10040         }
10041         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10042             DMA_RWCTRL_WRITE_BNDRY_16) {
10043                 static struct pci_device_id dma_wait_state_chipsets[] = {
10044                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10045                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10046                         { },
10047                 };
10048
10049                 /* DMA test passed without adjusting DMA boundary,
10050                  * now look for chipsets that are known to expose the
10051                  * DMA bug without failing the test.
10052                  */
10053                 if (pci_dev_present(dma_wait_state_chipsets)) {
10054                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10055                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10056                 }
10057                 else
10058                         /* Safe to use the calculated DMA boundary. */
10059                         tp->dma_rwctrl = saved_dma_rwctrl;
10060
10061                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10062         }
10063
10064 out:
10065         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10066 out_nofree:
10067         return ret;
10068 }
10069
10070 static void __devinit tg3_init_link_config(struct tg3 *tp)
10071 {
10072         tp->link_config.advertising =
10073                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10074                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10075                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10076                  ADVERTISED_Autoneg | ADVERTISED_MII);
10077         tp->link_config.speed = SPEED_INVALID;
10078         tp->link_config.duplex = DUPLEX_INVALID;
10079         tp->link_config.autoneg = AUTONEG_ENABLE;
10080         netif_carrier_off(tp->dev);
10081         tp->link_config.active_speed = SPEED_INVALID;
10082         tp->link_config.active_duplex = DUPLEX_INVALID;
10083         tp->link_config.phy_is_low_power = 0;
10084         tp->link_config.orig_speed = SPEED_INVALID;
10085         tp->link_config.orig_duplex = DUPLEX_INVALID;
10086         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10087 }
10088
10089 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10090 {
10091         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10092                 tp->bufmgr_config.mbuf_read_dma_low_water =
10093                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10094                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10095                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10096                 tp->bufmgr_config.mbuf_high_water =
10097                         DEFAULT_MB_HIGH_WATER_5705;
10098
10099                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10100                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10101                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10102                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10103                 tp->bufmgr_config.mbuf_high_water_jumbo =
10104                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10105         } else {
10106                 tp->bufmgr_config.mbuf_read_dma_low_water =
10107                         DEFAULT_MB_RDMA_LOW_WATER;
10108                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10109                         DEFAULT_MB_MACRX_LOW_WATER;
10110                 tp->bufmgr_config.mbuf_high_water =
10111                         DEFAULT_MB_HIGH_WATER;
10112
10113                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10114                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10115                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10116                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10117                 tp->bufmgr_config.mbuf_high_water_jumbo =
10118                         DEFAULT_MB_HIGH_WATER_JUMBO;
10119         }
10120
10121         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10122         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10123 }
10124
10125 static char * __devinit tg3_phy_string(struct tg3 *tp)
10126 {
10127         switch (tp->phy_id & PHY_ID_MASK) {
10128         case PHY_ID_BCM5400:    return "5400";
10129         case PHY_ID_BCM5401:    return "5401";
10130         case PHY_ID_BCM5411:    return "5411";
10131         case PHY_ID_BCM5701:    return "5701";
10132         case PHY_ID_BCM5703:    return "5703";
10133         case PHY_ID_BCM5704:    return "5704";
10134         case PHY_ID_BCM5705:    return "5705";
10135         case PHY_ID_BCM5750:    return "5750";
10136         case PHY_ID_BCM5752:    return "5752";
10137         case PHY_ID_BCM5780:    return "5780";
10138         case PHY_ID_BCM8002:    return "8002/serdes";
10139         case 0:                 return "serdes";
10140         default:                return "unknown";
10141         };
10142 }
10143
10144 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10145 {
10146         struct pci_dev *peer;
10147         unsigned int func, devnr = tp->pdev->devfn & ~7;
10148
10149         for (func = 0; func < 8; func++) {
10150                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10151                 if (peer && peer != tp->pdev)
10152                         break;
10153                 pci_dev_put(peer);
10154         }
10155         if (!peer || peer == tp->pdev)
10156                 BUG();
10157
10158         /*
10159          * We don't need to keep the refcount elevated; there's no way
10160          * to remove one half of this device without removing the other
10161          */
10162         pci_dev_put(peer);
10163
10164         return peer;
10165 }
10166
10167 static void __devinit tg3_init_coal(struct tg3 *tp)
10168 {
10169         struct ethtool_coalesce *ec = &tp->coal;
10170
10171         memset(ec, 0, sizeof(*ec));
10172         ec->cmd = ETHTOOL_GCOALESCE;
10173         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10174         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10175         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10176         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10177         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10178         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10179         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10180         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10181         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10182
10183         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10184                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10185                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10186                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10187                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10188                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10189         }
10190
10191         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10192                 ec->rx_coalesce_usecs_irq = 0;
10193                 ec->tx_coalesce_usecs_irq = 0;
10194                 ec->stats_block_coalesce_usecs = 0;
10195         }
10196 }
10197
10198 static int __devinit tg3_init_one(struct pci_dev *pdev,
10199                                   const struct pci_device_id *ent)
10200 {
10201         static int tg3_version_printed = 0;
10202         unsigned long tg3reg_base, tg3reg_len;
10203         struct net_device *dev;
10204         struct tg3 *tp;
10205         int i, err, pci_using_dac, pm_cap;
10206
10207         if (tg3_version_printed++ == 0)
10208                 printk(KERN_INFO "%s", version);
10209
10210         err = pci_enable_device(pdev);
10211         if (err) {
10212                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10213                        "aborting.\n");
10214                 return err;
10215         }
10216
10217         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10218                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10219                        "base address, aborting.\n");
10220                 err = -ENODEV;
10221                 goto err_out_disable_pdev;
10222         }
10223
10224         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10225         if (err) {
10226                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10227                        "aborting.\n");
10228                 goto err_out_disable_pdev;
10229         }
10230
10231         pci_set_master(pdev);
10232
10233         /* Find power-management capability. */
10234         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10235         if (pm_cap == 0) {
10236                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10237                        "aborting.\n");
10238                 err = -EIO;
10239                 goto err_out_free_res;
10240         }
10241
10242         /* Configure DMA attributes. */
10243         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
10244         if (!err) {
10245                 pci_using_dac = 1;
10246                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
10247                 if (err < 0) {
10248                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10249                                "for consistent allocations\n");
10250                         goto err_out_free_res;
10251                 }
10252         } else {
10253                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
10254                 if (err) {
10255                         printk(KERN_ERR PFX "No usable DMA configuration, "
10256                                "aborting.\n");
10257                         goto err_out_free_res;
10258                 }
10259                 pci_using_dac = 0;
10260         }
10261
10262         tg3reg_base = pci_resource_start(pdev, 0);
10263         tg3reg_len = pci_resource_len(pdev, 0);
10264
10265         dev = alloc_etherdev(sizeof(*tp));
10266         if (!dev) {
10267                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10268                 err = -ENOMEM;
10269                 goto err_out_free_res;
10270         }
10271
10272         SET_MODULE_OWNER(dev);
10273         SET_NETDEV_DEV(dev, &pdev->dev);
10274
10275         if (pci_using_dac)
10276                 dev->features |= NETIF_F_HIGHDMA;
10277         dev->features |= NETIF_F_LLTX;
10278 #if TG3_VLAN_TAG_USED
10279         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10280         dev->vlan_rx_register = tg3_vlan_rx_register;
10281         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10282 #endif
10283
10284         tp = netdev_priv(dev);
10285         tp->pdev = pdev;
10286         tp->dev = dev;
10287         tp->pm_cap = pm_cap;
10288         tp->mac_mode = TG3_DEF_MAC_MODE;
10289         tp->rx_mode = TG3_DEF_RX_MODE;
10290         tp->tx_mode = TG3_DEF_TX_MODE;
10291         tp->mi_mode = MAC_MI_MODE_BASE;
10292         if (tg3_debug > 0)
10293                 tp->msg_enable = tg3_debug;
10294         else
10295                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10296
10297         /* The word/byte swap controls here control register access byte
10298          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10299          * setting below.
10300          */
10301         tp->misc_host_ctrl =
10302                 MISC_HOST_CTRL_MASK_PCI_INT |
10303                 MISC_HOST_CTRL_WORD_SWAP |
10304                 MISC_HOST_CTRL_INDIR_ACCESS |
10305                 MISC_HOST_CTRL_PCISTATE_RW;
10306
10307         /* The NONFRM (non-frame) byte/word swap controls take effect
10308          * on descriptor entries, anything which isn't packet data.
10309          *
10310          * The StrongARM chips on the board (one for tx, one for rx)
10311          * are running in big-endian mode.
10312          */
10313         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10314                         GRC_MODE_WSWAP_NONFRM_DATA);
10315 #ifdef __BIG_ENDIAN
10316         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10317 #endif
10318         spin_lock_init(&tp->lock);
10319         spin_lock_init(&tp->tx_lock);
10320         spin_lock_init(&tp->indirect_lock);
10321         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10322
10323         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10324         if (tp->regs == 0UL) {
10325                 printk(KERN_ERR PFX "Cannot map device registers, "
10326                        "aborting.\n");
10327                 err = -ENOMEM;
10328                 goto err_out_free_dev;
10329         }
10330
10331         tg3_init_link_config(tp);
10332
10333         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10334         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10335         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10336
10337         dev->open = tg3_open;
10338         dev->stop = tg3_close;
10339         dev->get_stats = tg3_get_stats;
10340         dev->set_multicast_list = tg3_set_rx_mode;
10341         dev->set_mac_address = tg3_set_mac_addr;
10342         dev->do_ioctl = tg3_ioctl;
10343         dev->tx_timeout = tg3_tx_timeout;
10344         dev->poll = tg3_poll;
10345         dev->ethtool_ops = &tg3_ethtool_ops;
10346         dev->weight = 64;
10347         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10348         dev->change_mtu = tg3_change_mtu;
10349         dev->irq = pdev->irq;
10350 #ifdef CONFIG_NET_POLL_CONTROLLER
10351         dev->poll_controller = tg3_poll_controller;
10352 #endif
10353
10354         err = tg3_get_invariants(tp);
10355         if (err) {
10356                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10357                        "aborting.\n");
10358                 goto err_out_iounmap;
10359         }
10360
10361         tg3_init_bufmgr_config(tp);
10362
10363 #if TG3_TSO_SUPPORT != 0
10364         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10365                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10366         }
10367         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10368             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10369             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10370             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10371                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10372         } else {
10373                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10374         }
10375
10376         /* TSO is off by default, user can enable using ethtool.  */
10377 #if 0
10378         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10379                 dev->features |= NETIF_F_TSO;
10380 #endif
10381
10382 #endif
10383
10384         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10385             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10386             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10387                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10388                 tp->rx_pending = 63;
10389         }
10390
10391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10392                 tp->pdev_peer = tg3_find_5704_peer(tp);
10393
10394         err = tg3_get_device_address(tp);
10395         if (err) {
10396                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10397                        "aborting.\n");
10398                 goto err_out_iounmap;
10399         }
10400
10401         /*
10402          * Reset chip in case UNDI or EFI driver did not shutdown
10403          * DMA self test will enable WDMAC and we'll see (spurious)
10404          * pending DMA on the PCI bus at that point.
10405          */
10406         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10407             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10408                 pci_save_state(tp->pdev);
10409                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10410                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10411         }
10412
10413         err = tg3_test_dma(tp);
10414         if (err) {
10415                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10416                 goto err_out_iounmap;
10417         }
10418
10419         /* Tigon3 can do ipv4 only... and some chips have buggy
10420          * checksumming.
10421          */
10422         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10423                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10424                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10425         } else
10426                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10427
10428         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10429                 dev->features &= ~NETIF_F_HIGHDMA;
10430
10431         /* flow control autonegotiation is default behavior */
10432         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10433
10434         tg3_init_coal(tp);
10435
10436         /* Now that we have fully setup the chip, save away a snapshot
10437          * of the PCI config space.  We need to restore this after
10438          * GRC_MISC_CFG core clock resets and some resume events.
10439          */
10440         pci_save_state(tp->pdev);
10441
10442         err = register_netdev(dev);
10443         if (err) {
10444                 printk(KERN_ERR PFX "Cannot register net device, "
10445                        "aborting.\n");
10446                 goto err_out_iounmap;
10447         }
10448
10449         pci_set_drvdata(pdev, dev);
10450
10451         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10452                dev->name,
10453                tp->board_part_number,
10454                tp->pci_chip_rev_id,
10455                tg3_phy_string(tp),
10456                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10457                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10458                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10459                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10460                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10461                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10462
10463         for (i = 0; i < 6; i++)
10464                 printk("%2.2x%c", dev->dev_addr[i],
10465                        i == 5 ? '\n' : ':');
10466
10467         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10468                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10469                "TSOcap[%d] \n",
10470                dev->name,
10471                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10472                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10473                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10474                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10475                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10476                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10477                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10478         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10479                dev->name, tp->dma_rwctrl);
10480
10481         return 0;
10482
10483 err_out_iounmap:
10484         iounmap(tp->regs);
10485
10486 err_out_free_dev:
10487         free_netdev(dev);
10488
10489 err_out_free_res:
10490         pci_release_regions(pdev);
10491
10492 err_out_disable_pdev:
10493         pci_disable_device(pdev);
10494         pci_set_drvdata(pdev, NULL);
10495         return err;
10496 }
10497
10498 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10499 {
10500         struct net_device *dev = pci_get_drvdata(pdev);
10501
10502         if (dev) {
10503                 struct tg3 *tp = netdev_priv(dev);
10504
10505                 unregister_netdev(dev);
10506                 iounmap(tp->regs);
10507                 free_netdev(dev);
10508                 pci_release_regions(pdev);
10509                 pci_disable_device(pdev);
10510                 pci_set_drvdata(pdev, NULL);
10511         }
10512 }
10513
10514 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10515 {
10516         struct net_device *dev = pci_get_drvdata(pdev);
10517         struct tg3 *tp = netdev_priv(dev);
10518         int err;
10519
10520         if (!netif_running(dev))
10521                 return 0;
10522
10523         tg3_netif_stop(tp);
10524
10525         del_timer_sync(&tp->timer);
10526
10527         tg3_full_lock(tp, 1);
10528         tg3_disable_ints(tp);
10529         tg3_full_unlock(tp);
10530
10531         netif_device_detach(dev);
10532
10533         tg3_full_lock(tp, 0);
10534         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10535         tg3_full_unlock(tp);
10536
10537         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10538         if (err) {
10539                 tg3_full_lock(tp, 0);
10540
10541                 tg3_init_hw(tp);
10542
10543                 tp->timer.expires = jiffies + tp->timer_offset;
10544                 add_timer(&tp->timer);
10545
10546                 netif_device_attach(dev);
10547                 tg3_netif_start(tp);
10548
10549                 tg3_full_unlock(tp);
10550         }
10551
10552         return err;
10553 }
10554
10555 static int tg3_resume(struct pci_dev *pdev)
10556 {
10557         struct net_device *dev = pci_get_drvdata(pdev);
10558         struct tg3 *tp = netdev_priv(dev);
10559         int err;
10560
10561         if (!netif_running(dev))
10562                 return 0;
10563
10564         pci_restore_state(tp->pdev);
10565
10566         err = tg3_set_power_state(tp, 0);
10567         if (err)
10568                 return err;
10569
10570         netif_device_attach(dev);
10571
10572         tg3_full_lock(tp, 0);
10573
10574         tg3_init_hw(tp);
10575
10576         tp->timer.expires = jiffies + tp->timer_offset;
10577         add_timer(&tp->timer);
10578
10579         tg3_netif_start(tp);
10580
10581         tg3_full_unlock(tp);
10582
10583         return 0;
10584 }
10585
10586 static struct pci_driver tg3_driver = {
10587         .name           = DRV_MODULE_NAME,
10588         .id_table       = tg3_pci_tbl,
10589         .probe          = tg3_init_one,
10590         .remove         = __devexit_p(tg3_remove_one),
10591         .suspend        = tg3_suspend,
10592         .resume         = tg3_resume
10593 };
10594
10595 static int __init tg3_init(void)
10596 {
10597         return pci_module_init(&tg3_driver);
10598 }
10599
10600 static void __exit tg3_cleanup(void)
10601 {
10602         pci_unregister_driver(&tg3_driver);
10603 }
10604
10605 module_init(tg3_init);
10606 module_exit(tg3_cleanup);