tg3: Fix a flags typo
[safe/jmp/linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.92"
68 #define DRV_MODULE_RELDATE      "May 2, 2008"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
808 {
809         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
810         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
811 }
812
813 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
814 {
815         u32 phy;
816
817         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
818             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
819                 return;
820
821         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
822                 u32 ephy;
823
824                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
825                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
826                                      ephy | MII_TG3_EPHY_SHADOW_EN);
827                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
828                                 if (enable)
829                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
830                                 else
831                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
832                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
833                         }
834                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
835                 }
836         } else {
837                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
838                       MII_TG3_AUXCTL_SHDWSEL_MISC;
839                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
840                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
841                         if (enable)
842                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
843                         else
844                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
845                         phy |= MII_TG3_AUXCTL_MISC_WREN;
846                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
847                 }
848         }
849 }
850
851 static void tg3_phy_set_wirespeed(struct tg3 *tp)
852 {
853         u32 val;
854
855         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
856                 return;
857
858         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
859             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
860                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
861                              (val | (1 << 15) | (1 << 4)));
862 }
863
864 static int tg3_bmcr_reset(struct tg3 *tp)
865 {
866         u32 phy_control;
867         int limit, err;
868
869         /* OK, reset it, and poll the BMCR_RESET bit until it
870          * clears or we time out.
871          */
872         phy_control = BMCR_RESET;
873         err = tg3_writephy(tp, MII_BMCR, phy_control);
874         if (err != 0)
875                 return -EBUSY;
876
877         limit = 5000;
878         while (limit--) {
879                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
880                 if (err != 0)
881                         return -EBUSY;
882
883                 if ((phy_control & BMCR_RESET) == 0) {
884                         udelay(40);
885                         break;
886                 }
887                 udelay(10);
888         }
889         if (limit <= 0)
890                 return -EBUSY;
891
892         return 0;
893 }
894
895 static void tg3_phy_apply_otp(struct tg3 *tp)
896 {
897         u32 otp, phy;
898
899         if (!tp->phy_otp)
900                 return;
901
902         otp = tp->phy_otp;
903
904         /* Enable SM_DSP clock and tx 6dB coding. */
905         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
906               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
907               MII_TG3_AUXCTL_ACTL_TX_6DB;
908         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
909
910         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
911         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
912         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
913
914         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
915               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
916         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
917
918         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
919         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
920         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
921
922         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
923         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
924
925         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
926         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
927
928         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
929               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
930         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
931
932         /* Turn off SM_DSP clock. */
933         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
934               MII_TG3_AUXCTL_ACTL_TX_6DB;
935         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
936 }
937
938 static int tg3_wait_macro_done(struct tg3 *tp)
939 {
940         int limit = 100;
941
942         while (limit--) {
943                 u32 tmp32;
944
945                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
946                         if ((tmp32 & 0x1000) == 0)
947                                 break;
948                 }
949         }
950         if (limit <= 0)
951                 return -EBUSY;
952
953         return 0;
954 }
955
956 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
957 {
958         static const u32 test_pat[4][6] = {
959         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
960         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
961         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
962         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
963         };
964         int chan;
965
966         for (chan = 0; chan < 4; chan++) {
967                 int i;
968
969                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
970                              (chan * 0x2000) | 0x0200);
971                 tg3_writephy(tp, 0x16, 0x0002);
972
973                 for (i = 0; i < 6; i++)
974                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
975                                      test_pat[chan][i]);
976
977                 tg3_writephy(tp, 0x16, 0x0202);
978                 if (tg3_wait_macro_done(tp)) {
979                         *resetp = 1;
980                         return -EBUSY;
981                 }
982
983                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
984                              (chan * 0x2000) | 0x0200);
985                 tg3_writephy(tp, 0x16, 0x0082);
986                 if (tg3_wait_macro_done(tp)) {
987                         *resetp = 1;
988                         return -EBUSY;
989                 }
990
991                 tg3_writephy(tp, 0x16, 0x0802);
992                 if (tg3_wait_macro_done(tp)) {
993                         *resetp = 1;
994                         return -EBUSY;
995                 }
996
997                 for (i = 0; i < 6; i += 2) {
998                         u32 low, high;
999
1000                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1001                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1002                             tg3_wait_macro_done(tp)) {
1003                                 *resetp = 1;
1004                                 return -EBUSY;
1005                         }
1006                         low &= 0x7fff;
1007                         high &= 0x000f;
1008                         if (low != test_pat[chan][i] ||
1009                             high != test_pat[chan][i+1]) {
1010                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1011                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1012                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1013
1014                                 return -EBUSY;
1015                         }
1016                 }
1017         }
1018
1019         return 0;
1020 }
1021
1022 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1023 {
1024         int chan;
1025
1026         for (chan = 0; chan < 4; chan++) {
1027                 int i;
1028
1029                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1030                              (chan * 0x2000) | 0x0200);
1031                 tg3_writephy(tp, 0x16, 0x0002);
1032                 for (i = 0; i < 6; i++)
1033                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1034                 tg3_writephy(tp, 0x16, 0x0202);
1035                 if (tg3_wait_macro_done(tp))
1036                         return -EBUSY;
1037         }
1038
1039         return 0;
1040 }
1041
1042 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1043 {
1044         u32 reg32, phy9_orig;
1045         int retries, do_phy_reset, err;
1046
1047         retries = 10;
1048         do_phy_reset = 1;
1049         do {
1050                 if (do_phy_reset) {
1051                         err = tg3_bmcr_reset(tp);
1052                         if (err)
1053                                 return err;
1054                         do_phy_reset = 0;
1055                 }
1056
1057                 /* Disable transmitter and interrupt.  */
1058                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1059                         continue;
1060
1061                 reg32 |= 0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063
1064                 /* Set full-duplex, 1000 mbps.  */
1065                 tg3_writephy(tp, MII_BMCR,
1066                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1067
1068                 /* Set to master mode.  */
1069                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1070                         continue;
1071
1072                 tg3_writephy(tp, MII_TG3_CTRL,
1073                              (MII_TG3_CTRL_AS_MASTER |
1074                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1075
1076                 /* Enable SM_DSP_CLOCK and 6dB.  */
1077                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1078
1079                 /* Block the PHY control access.  */
1080                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1081                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1082
1083                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1084                 if (!err)
1085                         break;
1086         } while (--retries);
1087
1088         err = tg3_phy_reset_chanpat(tp);
1089         if (err)
1090                 return err;
1091
1092         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1093         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1094
1095         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1096         tg3_writephy(tp, 0x16, 0x0000);
1097
1098         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1099             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1100                 /* Set Extended packet length bit for jumbo frames */
1101                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1102         }
1103         else {
1104                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1105         }
1106
1107         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1108
1109         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1110                 reg32 &= ~0x3000;
1111                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1112         } else if (!err)
1113                 err = -EBUSY;
1114
1115         return err;
1116 }
1117
1118 static void tg3_link_report(struct tg3 *);
1119
1120 /* This will reset the tigon3 PHY if there is no valid
1121  * link unless the FORCE argument is non-zero.
1122  */
1123 static int tg3_phy_reset(struct tg3 *tp)
1124 {
1125         u32 cpmuctrl;
1126         u32 phy_status;
1127         int err;
1128
1129         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1130                 u32 val;
1131
1132                 val = tr32(GRC_MISC_CFG);
1133                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1134                 udelay(40);
1135         }
1136         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1137         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1138         if (err != 0)
1139                 return -EBUSY;
1140
1141         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1142                 netif_carrier_off(tp->dev);
1143                 tg3_link_report(tp);
1144         }
1145
1146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1147             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1148             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1149                 err = tg3_phy_reset_5703_4_5(tp);
1150                 if (err)
1151                         return err;
1152                 goto out;
1153         }
1154
1155         cpmuctrl = 0;
1156         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1157             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1158                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1159                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1160                         tw32(TG3_CPMU_CTRL,
1161                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1162         }
1163
1164         err = tg3_bmcr_reset(tp);
1165         if (err)
1166                 return err;
1167
1168         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1169                 u32 phy;
1170
1171                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1172                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1173
1174                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1175         }
1176
1177         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1178                 u32 val;
1179
1180                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1181                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1182                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1183                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1184                         udelay(40);
1185                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1186                 }
1187
1188                 /* Disable GPHY autopowerdown. */
1189                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1190                              MII_TG3_MISC_SHDW_WREN |
1191                              MII_TG3_MISC_SHDW_APD_SEL |
1192                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1193         }
1194
1195         tg3_phy_apply_otp(tp);
1196
1197 out:
1198         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1199                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1200                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1201                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1202                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1203                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1204                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1205         }
1206         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1207                 tg3_writephy(tp, 0x1c, 0x8d68);
1208                 tg3_writephy(tp, 0x1c, 0x8d68);
1209         }
1210         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1211                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1212                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1213                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1214                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1215                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1216                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1217                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1218                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1219         }
1220         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1221                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1222                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1223                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1224                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1225                         tg3_writephy(tp, MII_TG3_TEST1,
1226                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1227                 } else
1228                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1229                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1230         }
1231         /* Set Extended packet length bit (bit 14) on all chips that */
1232         /* support jumbo frames */
1233         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1234                 /* Cannot do read-modify-write on 5401 */
1235                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1236         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1237                 u32 phy_reg;
1238
1239                 /* Set bit 14 with read-modify-write to preserve other bits */
1240                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1241                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1242                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1243         }
1244
1245         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1246          * jumbo frames transmission.
1247          */
1248         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1249                 u32 phy_reg;
1250
1251                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1252                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1253                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1254         }
1255
1256         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1257                 /* adjust output voltage */
1258                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1259         }
1260
1261         tg3_phy_toggle_automdix(tp, 1);
1262         tg3_phy_set_wirespeed(tp);
1263         return 0;
1264 }
1265
1266 static void tg3_frob_aux_power(struct tg3 *tp)
1267 {
1268         struct tg3 *tp_peer = tp;
1269
1270         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1271                 return;
1272
1273         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1274             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1275                 struct net_device *dev_peer;
1276
1277                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1278                 /* remove_one() may have been run on the peer. */
1279                 if (!dev_peer)
1280                         tp_peer = tp;
1281                 else
1282                         tp_peer = netdev_priv(dev_peer);
1283         }
1284
1285         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1286             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1287             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1288             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1289                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1290                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1291                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1292                                     (GRC_LCLCTRL_GPIO_OE0 |
1293                                      GRC_LCLCTRL_GPIO_OE1 |
1294                                      GRC_LCLCTRL_GPIO_OE2 |
1295                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1296                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1297                                     100);
1298                 } else {
1299                         u32 no_gpio2;
1300                         u32 grc_local_ctrl = 0;
1301
1302                         if (tp_peer != tp &&
1303                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1304                                 return;
1305
1306                         /* Workaround to prevent overdrawing Amps. */
1307                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1308                             ASIC_REV_5714) {
1309                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1310                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1311                                             grc_local_ctrl, 100);
1312                         }
1313
1314                         /* On 5753 and variants, GPIO2 cannot be used. */
1315                         no_gpio2 = tp->nic_sram_data_cfg &
1316                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1317
1318                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1319                                          GRC_LCLCTRL_GPIO_OE1 |
1320                                          GRC_LCLCTRL_GPIO_OE2 |
1321                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1322                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1323                         if (no_gpio2) {
1324                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1325                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1326                         }
1327                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1328                                                     grc_local_ctrl, 100);
1329
1330                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1331
1332                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1333                                                     grc_local_ctrl, 100);
1334
1335                         if (!no_gpio2) {
1336                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1337                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1338                                             grc_local_ctrl, 100);
1339                         }
1340                 }
1341         } else {
1342                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1343                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1344                         if (tp_peer != tp &&
1345                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1346                                 return;
1347
1348                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1349                                     (GRC_LCLCTRL_GPIO_OE1 |
1350                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1351
1352                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1353                                     GRC_LCLCTRL_GPIO_OE1, 100);
1354
1355                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1356                                     (GRC_LCLCTRL_GPIO_OE1 |
1357                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1358                 }
1359         }
1360 }
1361
1362 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1363 {
1364         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1365                 return 1;
1366         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1367                 if (speed != SPEED_10)
1368                         return 1;
1369         } else if (speed == SPEED_10)
1370                 return 1;
1371
1372         return 0;
1373 }
1374
1375 static int tg3_setup_phy(struct tg3 *, int);
1376
1377 #define RESET_KIND_SHUTDOWN     0
1378 #define RESET_KIND_INIT         1
1379 #define RESET_KIND_SUSPEND      2
1380
1381 static void tg3_write_sig_post_reset(struct tg3 *, int);
1382 static int tg3_halt_cpu(struct tg3 *, u32);
1383 static int tg3_nvram_lock(struct tg3 *);
1384 static void tg3_nvram_unlock(struct tg3 *);
1385
1386 static void tg3_power_down_phy(struct tg3 *tp)
1387 {
1388         u32 val;
1389
1390         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1391                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1392                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1393                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1394
1395                         sg_dig_ctrl |=
1396                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1397                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1398                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1399                 }
1400                 return;
1401         }
1402
1403         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1404                 tg3_bmcr_reset(tp);
1405                 val = tr32(GRC_MISC_CFG);
1406                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1407                 udelay(40);
1408                 return;
1409         } else {
1410                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1411                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1412                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1413         }
1414
1415         /* The PHY should not be powered down on some chips because
1416          * of bugs.
1417          */
1418         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1419             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1420             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1421              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1422                 return;
1423
1424         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1425                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1426                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1427                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1428                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1429         }
1430
1431         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1432 }
1433
1434 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1435 {
1436         u32 misc_host_ctrl;
1437         u16 power_control, power_caps;
1438         int pm = tp->pm_cap;
1439
1440         /* Make sure register accesses (indirect or otherwise)
1441          * will function correctly.
1442          */
1443         pci_write_config_dword(tp->pdev,
1444                                TG3PCI_MISC_HOST_CTRL,
1445                                tp->misc_host_ctrl);
1446
1447         pci_read_config_word(tp->pdev,
1448                              pm + PCI_PM_CTRL,
1449                              &power_control);
1450         power_control |= PCI_PM_CTRL_PME_STATUS;
1451         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1452         switch (state) {
1453         case PCI_D0:
1454                 power_control |= 0;
1455                 pci_write_config_word(tp->pdev,
1456                                       pm + PCI_PM_CTRL,
1457                                       power_control);
1458                 udelay(100);    /* Delay after power state change */
1459
1460                 /* Switch out of Vaux if it is a NIC */
1461                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1462                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1463
1464                 return 0;
1465
1466         case PCI_D1:
1467                 power_control |= 1;
1468                 break;
1469
1470         case PCI_D2:
1471                 power_control |= 2;
1472                 break;
1473
1474         case PCI_D3hot:
1475                 power_control |= 3;
1476                 break;
1477
1478         default:
1479                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1480                        "requested.\n",
1481                        tp->dev->name, state);
1482                 return -EINVAL;
1483         };
1484
1485         power_control |= PCI_PM_CTRL_PME_ENABLE;
1486
1487         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1488         tw32(TG3PCI_MISC_HOST_CTRL,
1489              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1490
1491         if (tp->link_config.phy_is_low_power == 0) {
1492                 tp->link_config.phy_is_low_power = 1;
1493                 tp->link_config.orig_speed = tp->link_config.speed;
1494                 tp->link_config.orig_duplex = tp->link_config.duplex;
1495                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1496         }
1497
1498         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1499                 tp->link_config.speed = SPEED_10;
1500                 tp->link_config.duplex = DUPLEX_HALF;
1501                 tp->link_config.autoneg = AUTONEG_ENABLE;
1502                 tg3_setup_phy(tp, 0);
1503         }
1504
1505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1506                 u32 val;
1507
1508                 val = tr32(GRC_VCPU_EXT_CTRL);
1509                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1510         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1511                 int i;
1512                 u32 val;
1513
1514                 for (i = 0; i < 200; i++) {
1515                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1516                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1517                                 break;
1518                         msleep(1);
1519                 }
1520         }
1521         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1522                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1523                                                      WOL_DRV_STATE_SHUTDOWN |
1524                                                      WOL_DRV_WOL |
1525                                                      WOL_SET_MAGIC_PKT);
1526
1527         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1528
1529         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1530                 u32 mac_mode;
1531
1532                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1533                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1534                         udelay(40);
1535
1536                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1537                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1538                         else
1539                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1540
1541                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1542                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1543                             ASIC_REV_5700) {
1544                                 u32 speed = (tp->tg3_flags &
1545                                              TG3_FLAG_WOL_SPEED_100MB) ?
1546                                              SPEED_100 : SPEED_10;
1547                                 if (tg3_5700_link_polarity(tp, speed))
1548                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1549                                 else
1550                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1551                         }
1552                 } else {
1553                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1554                 }
1555
1556                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1557                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1558
1559                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1560                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1561                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1562
1563                 tw32_f(MAC_MODE, mac_mode);
1564                 udelay(100);
1565
1566                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1567                 udelay(10);
1568         }
1569
1570         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1571             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1572              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1573                 u32 base_val;
1574
1575                 base_val = tp->pci_clock_ctrl;
1576                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1577                              CLOCK_CTRL_TXCLK_DISABLE);
1578
1579                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1580                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1581         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1582                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1583                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1584                 /* do nothing */
1585         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1586                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1587                 u32 newbits1, newbits2;
1588
1589                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1590                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1591                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1592                                     CLOCK_CTRL_TXCLK_DISABLE |
1593                                     CLOCK_CTRL_ALTCLK);
1594                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1595                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1596                         newbits1 = CLOCK_CTRL_625_CORE;
1597                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1598                 } else {
1599                         newbits1 = CLOCK_CTRL_ALTCLK;
1600                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1601                 }
1602
1603                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1604                             40);
1605
1606                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1607                             40);
1608
1609                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1610                         u32 newbits3;
1611
1612                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1613                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1614                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1615                                             CLOCK_CTRL_TXCLK_DISABLE |
1616                                             CLOCK_CTRL_44MHZ_CORE);
1617                         } else {
1618                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1619                         }
1620
1621                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1622                                     tp->pci_clock_ctrl | newbits3, 40);
1623                 }
1624         }
1625
1626         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1627             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1628             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1629                 tg3_power_down_phy(tp);
1630
1631         tg3_frob_aux_power(tp);
1632
1633         /* Workaround for unstable PLL clock */
1634         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1635             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1636                 u32 val = tr32(0x7d00);
1637
1638                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1639                 tw32(0x7d00, val);
1640                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1641                         int err;
1642
1643                         err = tg3_nvram_lock(tp);
1644                         tg3_halt_cpu(tp, RX_CPU_BASE);
1645                         if (!err)
1646                                 tg3_nvram_unlock(tp);
1647                 }
1648         }
1649
1650         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1651
1652         /* Finally, set the new power state. */
1653         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1654         udelay(100);    /* Delay after power state change */
1655
1656         return 0;
1657 }
1658
1659 /* tp->lock is held. */
1660 static void tg3_wait_for_event_ack(struct tg3 *tp)
1661 {
1662         int i;
1663
1664         /* Wait for up to 2.5 milliseconds */
1665         for (i = 0; i < 250000; i++) {
1666                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1667                         break;
1668                 udelay(10);
1669         }
1670 }
1671
1672 /* tp->lock is held. */
1673 static void tg3_ump_link_report(struct tg3 *tp)
1674 {
1675         u32 reg;
1676         u32 val;
1677
1678         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1679             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1680                 return;
1681
1682         tg3_wait_for_event_ack(tp);
1683
1684         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1685
1686         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1687
1688         val = 0;
1689         if (!tg3_readphy(tp, MII_BMCR, &reg))
1690                 val = reg << 16;
1691         if (!tg3_readphy(tp, MII_BMSR, &reg))
1692                 val |= (reg & 0xffff);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1694
1695         val = 0;
1696         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1697                 val = reg << 16;
1698         if (!tg3_readphy(tp, MII_LPA, &reg))
1699                 val |= (reg & 0xffff);
1700         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1701
1702         val = 0;
1703         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1704                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1705                         val = reg << 16;
1706                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1707                         val |= (reg & 0xffff);
1708         }
1709         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1710
1711         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1712                 val = reg << 16;
1713         else
1714                 val = 0;
1715         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1716
1717         val = tr32(GRC_RX_CPU_EVENT);
1718         val |= GRC_RX_CPU_DRIVER_EVENT;
1719         tw32_f(GRC_RX_CPU_EVENT, val);
1720 }
1721
1722 static void tg3_link_report(struct tg3 *tp)
1723 {
1724         if (!netif_carrier_ok(tp->dev)) {
1725                 if (netif_msg_link(tp))
1726                         printk(KERN_INFO PFX "%s: Link is down.\n",
1727                                tp->dev->name);
1728                 tg3_ump_link_report(tp);
1729         } else if (netif_msg_link(tp)) {
1730                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1731                        tp->dev->name,
1732                        (tp->link_config.active_speed == SPEED_1000 ?
1733                         1000 :
1734                         (tp->link_config.active_speed == SPEED_100 ?
1735                          100 : 10)),
1736                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1737                         "full" : "half"));
1738
1739                 printk(KERN_INFO PFX
1740                        "%s: Flow control is %s for TX and %s for RX.\n",
1741                        tp->dev->name,
1742                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1743                        "on" : "off",
1744                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1745                        "on" : "off");
1746                 tg3_ump_link_report(tp);
1747         }
1748 }
1749
1750 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1751 {
1752         u16 miireg;
1753
1754         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1755                 miireg = ADVERTISE_PAUSE_CAP;
1756         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1757                 miireg = ADVERTISE_PAUSE_ASYM;
1758         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1759                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1760         else
1761                 miireg = 0;
1762
1763         return miireg;
1764 }
1765
1766 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1767 {
1768         u16 miireg;
1769
1770         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1771                 miireg = ADVERTISE_1000XPAUSE;
1772         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1773                 miireg = ADVERTISE_1000XPSE_ASYM;
1774         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1775                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1776         else
1777                 miireg = 0;
1778
1779         return miireg;
1780 }
1781
1782 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1783 {
1784         u8 cap = 0;
1785
1786         if (lcladv & ADVERTISE_PAUSE_CAP) {
1787                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1788                         if (rmtadv & LPA_PAUSE_CAP)
1789                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1790                         else if (rmtadv & LPA_PAUSE_ASYM)
1791                                 cap = TG3_FLOW_CTRL_RX;
1792                 } else {
1793                         if (rmtadv & LPA_PAUSE_CAP)
1794                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1795                 }
1796         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1797                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1798                         cap = TG3_FLOW_CTRL_TX;
1799         }
1800
1801         return cap;
1802 }
1803
1804 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1805 {
1806         u8 cap = 0;
1807
1808         if (lcladv & ADVERTISE_1000XPAUSE) {
1809                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1810                         if (rmtadv & LPA_1000XPAUSE)
1811                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1812                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1813                                 cap = TG3_FLOW_CTRL_RX;
1814                 } else {
1815                         if (rmtadv & LPA_1000XPAUSE)
1816                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1817                 }
1818         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1819                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1820                         cap = TG3_FLOW_CTRL_TX;
1821         }
1822
1823         return cap;
1824 }
1825
1826 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1827 {
1828         u8 new_tg3_flags = 0;
1829         u32 old_rx_mode = tp->rx_mode;
1830         u32 old_tx_mode = tp->tx_mode;
1831
1832         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1833             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1834                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1835                         new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1836                                                                    remote_adv);
1837                 else
1838                         new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1839                                                                    remote_adv);
1840         } else {
1841                 new_tg3_flags = tp->link_config.flowctrl;
1842         }
1843
1844         tp->link_config.active_flowctrl = new_tg3_flags;
1845
1846         if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1847                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1848         else
1849                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1850
1851         if (old_rx_mode != tp->rx_mode) {
1852                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1853         }
1854
1855         if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1856                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1857         else
1858                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1859
1860         if (old_tx_mode != tp->tx_mode) {
1861                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1862         }
1863 }
1864
1865 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1866 {
1867         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1868         case MII_TG3_AUX_STAT_10HALF:
1869                 *speed = SPEED_10;
1870                 *duplex = DUPLEX_HALF;
1871                 break;
1872
1873         case MII_TG3_AUX_STAT_10FULL:
1874                 *speed = SPEED_10;
1875                 *duplex = DUPLEX_FULL;
1876                 break;
1877
1878         case MII_TG3_AUX_STAT_100HALF:
1879                 *speed = SPEED_100;
1880                 *duplex = DUPLEX_HALF;
1881                 break;
1882
1883         case MII_TG3_AUX_STAT_100FULL:
1884                 *speed = SPEED_100;
1885                 *duplex = DUPLEX_FULL;
1886                 break;
1887
1888         case MII_TG3_AUX_STAT_1000HALF:
1889                 *speed = SPEED_1000;
1890                 *duplex = DUPLEX_HALF;
1891                 break;
1892
1893         case MII_TG3_AUX_STAT_1000FULL:
1894                 *speed = SPEED_1000;
1895                 *duplex = DUPLEX_FULL;
1896                 break;
1897
1898         default:
1899                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1900                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1901                                  SPEED_10;
1902                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1903                                   DUPLEX_HALF;
1904                         break;
1905                 }
1906                 *speed = SPEED_INVALID;
1907                 *duplex = DUPLEX_INVALID;
1908                 break;
1909         };
1910 }
1911
1912 static void tg3_phy_copper_begin(struct tg3 *tp)
1913 {
1914         u32 new_adv;
1915         int i;
1916
1917         if (tp->link_config.phy_is_low_power) {
1918                 /* Entering low power mode.  Disable gigabit and
1919                  * 100baseT advertisements.
1920                  */
1921                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1922
1923                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1924                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1925                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1926                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1927
1928                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1929         } else if (tp->link_config.speed == SPEED_INVALID) {
1930                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1931                         tp->link_config.advertising &=
1932                                 ~(ADVERTISED_1000baseT_Half |
1933                                   ADVERTISED_1000baseT_Full);
1934
1935                 new_adv = ADVERTISE_CSMA;
1936                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1937                         new_adv |= ADVERTISE_10HALF;
1938                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1939                         new_adv |= ADVERTISE_10FULL;
1940                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1941                         new_adv |= ADVERTISE_100HALF;
1942                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1943                         new_adv |= ADVERTISE_100FULL;
1944
1945                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1946
1947                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1948
1949                 if (tp->link_config.advertising &
1950                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1951                         new_adv = 0;
1952                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1953                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1954                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1955                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1956                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1957                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1958                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1959                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1960                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1961                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1962                 } else {
1963                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1964                 }
1965         } else {
1966                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1967                 new_adv |= ADVERTISE_CSMA;
1968
1969                 /* Asking for a specific link mode. */
1970                 if (tp->link_config.speed == SPEED_1000) {
1971                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1972
1973                         if (tp->link_config.duplex == DUPLEX_FULL)
1974                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1975                         else
1976                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1977                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1978                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1979                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1980                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1981                 } else {
1982                         if (tp->link_config.speed == SPEED_100) {
1983                                 if (tp->link_config.duplex == DUPLEX_FULL)
1984                                         new_adv |= ADVERTISE_100FULL;
1985                                 else
1986                                         new_adv |= ADVERTISE_100HALF;
1987                         } else {
1988                                 if (tp->link_config.duplex == DUPLEX_FULL)
1989                                         new_adv |= ADVERTISE_10FULL;
1990                                 else
1991                                         new_adv |= ADVERTISE_10HALF;
1992                         }
1993                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1994
1995                         new_adv = 0;
1996                 }
1997
1998                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1999         }
2000
2001         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2002             tp->link_config.speed != SPEED_INVALID) {
2003                 u32 bmcr, orig_bmcr;
2004
2005                 tp->link_config.active_speed = tp->link_config.speed;
2006                 tp->link_config.active_duplex = tp->link_config.duplex;
2007
2008                 bmcr = 0;
2009                 switch (tp->link_config.speed) {
2010                 default:
2011                 case SPEED_10:
2012                         break;
2013
2014                 case SPEED_100:
2015                         bmcr |= BMCR_SPEED100;
2016                         break;
2017
2018                 case SPEED_1000:
2019                         bmcr |= TG3_BMCR_SPEED1000;
2020                         break;
2021                 };
2022
2023                 if (tp->link_config.duplex == DUPLEX_FULL)
2024                         bmcr |= BMCR_FULLDPLX;
2025
2026                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2027                     (bmcr != orig_bmcr)) {
2028                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2029                         for (i = 0; i < 1500; i++) {
2030                                 u32 tmp;
2031
2032                                 udelay(10);
2033                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2034                                     tg3_readphy(tp, MII_BMSR, &tmp))
2035                                         continue;
2036                                 if (!(tmp & BMSR_LSTATUS)) {
2037                                         udelay(40);
2038                                         break;
2039                                 }
2040                         }
2041                         tg3_writephy(tp, MII_BMCR, bmcr);
2042                         udelay(40);
2043                 }
2044         } else {
2045                 tg3_writephy(tp, MII_BMCR,
2046                              BMCR_ANENABLE | BMCR_ANRESTART);
2047         }
2048 }
2049
2050 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2051 {
2052         int err;
2053
2054         /* Turn off tap power management. */
2055         /* Set Extended packet length bit */
2056         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2057
2058         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2059         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2060
2061         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2062         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2063
2064         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2065         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2066
2067         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2068         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2069
2070         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2071         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2072
2073         udelay(40);
2074
2075         return err;
2076 }
2077
2078 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2079 {
2080         u32 adv_reg, all_mask = 0;
2081
2082         if (mask & ADVERTISED_10baseT_Half)
2083                 all_mask |= ADVERTISE_10HALF;
2084         if (mask & ADVERTISED_10baseT_Full)
2085                 all_mask |= ADVERTISE_10FULL;
2086         if (mask & ADVERTISED_100baseT_Half)
2087                 all_mask |= ADVERTISE_100HALF;
2088         if (mask & ADVERTISED_100baseT_Full)
2089                 all_mask |= ADVERTISE_100FULL;
2090
2091         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2092                 return 0;
2093
2094         if ((adv_reg & all_mask) != all_mask)
2095                 return 0;
2096         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2097                 u32 tg3_ctrl;
2098
2099                 all_mask = 0;
2100                 if (mask & ADVERTISED_1000baseT_Half)
2101                         all_mask |= ADVERTISE_1000HALF;
2102                 if (mask & ADVERTISED_1000baseT_Full)
2103                         all_mask |= ADVERTISE_1000FULL;
2104
2105                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2106                         return 0;
2107
2108                 if ((tg3_ctrl & all_mask) != all_mask)
2109                         return 0;
2110         }
2111         return 1;
2112 }
2113
2114 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2115 {
2116         u32 curadv, reqadv;
2117
2118         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2119                 return 1;
2120
2121         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2122         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2123
2124         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2125                 if (curadv != reqadv)
2126                         return 0;
2127
2128                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2129                         tg3_readphy(tp, MII_LPA, rmtadv);
2130         } else {
2131                 /* Reprogram the advertisement register, even if it
2132                  * does not affect the current link.  If the link
2133                  * gets renegotiated in the future, we can save an
2134                  * additional renegotiation cycle by advertising
2135                  * it correctly in the first place.
2136                  */
2137                 if (curadv != reqadv) {
2138                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2139                                      ADVERTISE_PAUSE_ASYM);
2140                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2141                 }
2142         }
2143
2144         return 1;
2145 }
2146
2147 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2148 {
2149         int current_link_up;
2150         u32 bmsr, dummy;
2151         u32 lcl_adv, rmt_adv;
2152         u16 current_speed;
2153         u8 current_duplex;
2154         int i, err;
2155
2156         tw32(MAC_EVENT, 0);
2157
2158         tw32_f(MAC_STATUS,
2159              (MAC_STATUS_SYNC_CHANGED |
2160               MAC_STATUS_CFG_CHANGED |
2161               MAC_STATUS_MI_COMPLETION |
2162               MAC_STATUS_LNKSTATE_CHANGED));
2163         udelay(40);
2164
2165         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2166                 tw32_f(MAC_MI_MODE,
2167                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2168                 udelay(80);
2169         }
2170
2171         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2172
2173         /* Some third-party PHYs need to be reset on link going
2174          * down.
2175          */
2176         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2177              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2178              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2179             netif_carrier_ok(tp->dev)) {
2180                 tg3_readphy(tp, MII_BMSR, &bmsr);
2181                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2182                     !(bmsr & BMSR_LSTATUS))
2183                         force_reset = 1;
2184         }
2185         if (force_reset)
2186                 tg3_phy_reset(tp);
2187
2188         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2189                 tg3_readphy(tp, MII_BMSR, &bmsr);
2190                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2191                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2192                         bmsr = 0;
2193
2194                 if (!(bmsr & BMSR_LSTATUS)) {
2195                         err = tg3_init_5401phy_dsp(tp);
2196                         if (err)
2197                                 return err;
2198
2199                         tg3_readphy(tp, MII_BMSR, &bmsr);
2200                         for (i = 0; i < 1000; i++) {
2201                                 udelay(10);
2202                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2203                                     (bmsr & BMSR_LSTATUS)) {
2204                                         udelay(40);
2205                                         break;
2206                                 }
2207                         }
2208
2209                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2210                             !(bmsr & BMSR_LSTATUS) &&
2211                             tp->link_config.active_speed == SPEED_1000) {
2212                                 err = tg3_phy_reset(tp);
2213                                 if (!err)
2214                                         err = tg3_init_5401phy_dsp(tp);
2215                                 if (err)
2216                                         return err;
2217                         }
2218                 }
2219         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2220                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2221                 /* 5701 {A0,B0} CRC bug workaround */
2222                 tg3_writephy(tp, 0x15, 0x0a75);
2223                 tg3_writephy(tp, 0x1c, 0x8c68);
2224                 tg3_writephy(tp, 0x1c, 0x8d68);
2225                 tg3_writephy(tp, 0x1c, 0x8c68);
2226         }
2227
2228         /* Clear pending interrupts... */
2229         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2230         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2231
2232         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2233                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2234         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2235                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2236
2237         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2238             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2239                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2240                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2241                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2242                 else
2243                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2244         }
2245
2246         current_link_up = 0;
2247         current_speed = SPEED_INVALID;
2248         current_duplex = DUPLEX_INVALID;
2249
2250         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2251                 u32 val;
2252
2253                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2254                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2255                 if (!(val & (1 << 10))) {
2256                         val |= (1 << 10);
2257                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2258                         goto relink;
2259                 }
2260         }
2261
2262         bmsr = 0;
2263         for (i = 0; i < 100; i++) {
2264                 tg3_readphy(tp, MII_BMSR, &bmsr);
2265                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2266                     (bmsr & BMSR_LSTATUS))
2267                         break;
2268                 udelay(40);
2269         }
2270
2271         if (bmsr & BMSR_LSTATUS) {
2272                 u32 aux_stat, bmcr;
2273
2274                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2275                 for (i = 0; i < 2000; i++) {
2276                         udelay(10);
2277                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2278                             aux_stat)
2279                                 break;
2280                 }
2281
2282                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2283                                              &current_speed,
2284                                              &current_duplex);
2285
2286                 bmcr = 0;
2287                 for (i = 0; i < 200; i++) {
2288                         tg3_readphy(tp, MII_BMCR, &bmcr);
2289                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2290                                 continue;
2291                         if (bmcr && bmcr != 0x7fff)
2292                                 break;
2293                         udelay(10);
2294                 }
2295
2296                 lcl_adv = 0;
2297                 rmt_adv = 0;
2298
2299                 tp->link_config.active_speed = current_speed;
2300                 tp->link_config.active_duplex = current_duplex;
2301
2302                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2303                         if ((bmcr & BMCR_ANENABLE) &&
2304                             tg3_copper_is_advertising_all(tp,
2305                                                 tp->link_config.advertising)) {
2306                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2307                                                                   &rmt_adv))
2308                                         current_link_up = 1;
2309                         }
2310                 } else {
2311                         if (!(bmcr & BMCR_ANENABLE) &&
2312                             tp->link_config.speed == current_speed &&
2313                             tp->link_config.duplex == current_duplex &&
2314                             tp->link_config.flowctrl ==
2315                             tp->link_config.active_flowctrl) {
2316                                 current_link_up = 1;
2317                         }
2318                 }
2319
2320                 if (current_link_up == 1 &&
2321                     tp->link_config.active_duplex == DUPLEX_FULL)
2322                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2323         }
2324
2325 relink:
2326         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2327                 u32 tmp;
2328
2329                 tg3_phy_copper_begin(tp);
2330
2331                 tg3_readphy(tp, MII_BMSR, &tmp);
2332                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2333                     (tmp & BMSR_LSTATUS))
2334                         current_link_up = 1;
2335         }
2336
2337         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2338         if (current_link_up == 1) {
2339                 if (tp->link_config.active_speed == SPEED_100 ||
2340                     tp->link_config.active_speed == SPEED_10)
2341                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2342                 else
2343                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2344         } else
2345                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2346
2347         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2348         if (tp->link_config.active_duplex == DUPLEX_HALF)
2349                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2350
2351         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2352                 if (current_link_up == 1 &&
2353                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2354                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2355                 else
2356                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2357         }
2358
2359         /* ??? Without this setting Netgear GA302T PHY does not
2360          * ??? send/receive packets...
2361          */
2362         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2363             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2364                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2365                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2366                 udelay(80);
2367         }
2368
2369         tw32_f(MAC_MODE, tp->mac_mode);
2370         udelay(40);
2371
2372         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2373                 /* Polled via timer. */
2374                 tw32_f(MAC_EVENT, 0);
2375         } else {
2376                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2377         }
2378         udelay(40);
2379
2380         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2381             current_link_up == 1 &&
2382             tp->link_config.active_speed == SPEED_1000 &&
2383             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2384              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2385                 udelay(120);
2386                 tw32_f(MAC_STATUS,
2387                      (MAC_STATUS_SYNC_CHANGED |
2388                       MAC_STATUS_CFG_CHANGED));
2389                 udelay(40);
2390                 tg3_write_mem(tp,
2391                               NIC_SRAM_FIRMWARE_MBOX,
2392                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2393         }
2394
2395         if (current_link_up != netif_carrier_ok(tp->dev)) {
2396                 if (current_link_up)
2397                         netif_carrier_on(tp->dev);
2398                 else
2399                         netif_carrier_off(tp->dev);
2400                 tg3_link_report(tp);
2401         }
2402
2403         return 0;
2404 }
2405
2406 struct tg3_fiber_aneginfo {
2407         int state;
2408 #define ANEG_STATE_UNKNOWN              0
2409 #define ANEG_STATE_AN_ENABLE            1
2410 #define ANEG_STATE_RESTART_INIT         2
2411 #define ANEG_STATE_RESTART              3
2412 #define ANEG_STATE_DISABLE_LINK_OK      4
2413 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2414 #define ANEG_STATE_ABILITY_DETECT       6
2415 #define ANEG_STATE_ACK_DETECT_INIT      7
2416 #define ANEG_STATE_ACK_DETECT           8
2417 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2418 #define ANEG_STATE_COMPLETE_ACK         10
2419 #define ANEG_STATE_IDLE_DETECT_INIT     11
2420 #define ANEG_STATE_IDLE_DETECT          12
2421 #define ANEG_STATE_LINK_OK              13
2422 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2423 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2424
2425         u32 flags;
2426 #define MR_AN_ENABLE            0x00000001
2427 #define MR_RESTART_AN           0x00000002
2428 #define MR_AN_COMPLETE          0x00000004
2429 #define MR_PAGE_RX              0x00000008
2430 #define MR_NP_LOADED            0x00000010
2431 #define MR_TOGGLE_TX            0x00000020
2432 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2433 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2434 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2435 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2436 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2437 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2438 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2439 #define MR_TOGGLE_RX            0x00002000
2440 #define MR_NP_RX                0x00004000
2441
2442 #define MR_LINK_OK              0x80000000
2443
2444         unsigned long link_time, cur_time;
2445
2446         u32 ability_match_cfg;
2447         int ability_match_count;
2448
2449         char ability_match, idle_match, ack_match;
2450
2451         u32 txconfig, rxconfig;
2452 #define ANEG_CFG_NP             0x00000080
2453 #define ANEG_CFG_ACK            0x00000040
2454 #define ANEG_CFG_RF2            0x00000020
2455 #define ANEG_CFG_RF1            0x00000010
2456 #define ANEG_CFG_PS2            0x00000001
2457 #define ANEG_CFG_PS1            0x00008000
2458 #define ANEG_CFG_HD             0x00004000
2459 #define ANEG_CFG_FD             0x00002000
2460 #define ANEG_CFG_INVAL          0x00001f06
2461
2462 };
2463 #define ANEG_OK         0
2464 #define ANEG_DONE       1
2465 #define ANEG_TIMER_ENAB 2
2466 #define ANEG_FAILED     -1
2467
2468 #define ANEG_STATE_SETTLE_TIME  10000
2469
2470 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2471                                    struct tg3_fiber_aneginfo *ap)
2472 {
2473         u16 flowctrl;
2474         unsigned long delta;
2475         u32 rx_cfg_reg;
2476         int ret;
2477
2478         if (ap->state == ANEG_STATE_UNKNOWN) {
2479                 ap->rxconfig = 0;
2480                 ap->link_time = 0;
2481                 ap->cur_time = 0;
2482                 ap->ability_match_cfg = 0;
2483                 ap->ability_match_count = 0;
2484                 ap->ability_match = 0;
2485                 ap->idle_match = 0;
2486                 ap->ack_match = 0;
2487         }
2488         ap->cur_time++;
2489
2490         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2491                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2492
2493                 if (rx_cfg_reg != ap->ability_match_cfg) {
2494                         ap->ability_match_cfg = rx_cfg_reg;
2495                         ap->ability_match = 0;
2496                         ap->ability_match_count = 0;
2497                 } else {
2498                         if (++ap->ability_match_count > 1) {
2499                                 ap->ability_match = 1;
2500                                 ap->ability_match_cfg = rx_cfg_reg;
2501                         }
2502                 }
2503                 if (rx_cfg_reg & ANEG_CFG_ACK)
2504                         ap->ack_match = 1;
2505                 else
2506                         ap->ack_match = 0;
2507
2508                 ap->idle_match = 0;
2509         } else {
2510                 ap->idle_match = 1;
2511                 ap->ability_match_cfg = 0;
2512                 ap->ability_match_count = 0;
2513                 ap->ability_match = 0;
2514                 ap->ack_match = 0;
2515
2516                 rx_cfg_reg = 0;
2517         }
2518
2519         ap->rxconfig = rx_cfg_reg;
2520         ret = ANEG_OK;
2521
2522         switch(ap->state) {
2523         case ANEG_STATE_UNKNOWN:
2524                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2525                         ap->state = ANEG_STATE_AN_ENABLE;
2526
2527                 /* fallthru */
2528         case ANEG_STATE_AN_ENABLE:
2529                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2530                 if (ap->flags & MR_AN_ENABLE) {
2531                         ap->link_time = 0;
2532                         ap->cur_time = 0;
2533                         ap->ability_match_cfg = 0;
2534                         ap->ability_match_count = 0;
2535                         ap->ability_match = 0;
2536                         ap->idle_match = 0;
2537                         ap->ack_match = 0;
2538
2539                         ap->state = ANEG_STATE_RESTART_INIT;
2540                 } else {
2541                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2542                 }
2543                 break;
2544
2545         case ANEG_STATE_RESTART_INIT:
2546                 ap->link_time = ap->cur_time;
2547                 ap->flags &= ~(MR_NP_LOADED);
2548                 ap->txconfig = 0;
2549                 tw32(MAC_TX_AUTO_NEG, 0);
2550                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2551                 tw32_f(MAC_MODE, tp->mac_mode);
2552                 udelay(40);
2553
2554                 ret = ANEG_TIMER_ENAB;
2555                 ap->state = ANEG_STATE_RESTART;
2556
2557                 /* fallthru */
2558         case ANEG_STATE_RESTART:
2559                 delta = ap->cur_time - ap->link_time;
2560                 if (delta > ANEG_STATE_SETTLE_TIME) {
2561                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2562                 } else {
2563                         ret = ANEG_TIMER_ENAB;
2564                 }
2565                 break;
2566
2567         case ANEG_STATE_DISABLE_LINK_OK:
2568                 ret = ANEG_DONE;
2569                 break;
2570
2571         case ANEG_STATE_ABILITY_DETECT_INIT:
2572                 ap->flags &= ~(MR_TOGGLE_TX);
2573                 ap->txconfig = ANEG_CFG_FD;
2574                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2575                 if (flowctrl & ADVERTISE_1000XPAUSE)
2576                         ap->txconfig |= ANEG_CFG_PS1;
2577                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2578                         ap->txconfig |= ANEG_CFG_PS2;
2579                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2580                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2581                 tw32_f(MAC_MODE, tp->mac_mode);
2582                 udelay(40);
2583
2584                 ap->state = ANEG_STATE_ABILITY_DETECT;
2585                 break;
2586
2587         case ANEG_STATE_ABILITY_DETECT:
2588                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2589                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2590                 }
2591                 break;
2592
2593         case ANEG_STATE_ACK_DETECT_INIT:
2594                 ap->txconfig |= ANEG_CFG_ACK;
2595                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2596                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2597                 tw32_f(MAC_MODE, tp->mac_mode);
2598                 udelay(40);
2599
2600                 ap->state = ANEG_STATE_ACK_DETECT;
2601
2602                 /* fallthru */
2603         case ANEG_STATE_ACK_DETECT:
2604                 if (ap->ack_match != 0) {
2605                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2606                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2607                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2608                         } else {
2609                                 ap->state = ANEG_STATE_AN_ENABLE;
2610                         }
2611                 } else if (ap->ability_match != 0 &&
2612                            ap->rxconfig == 0) {
2613                         ap->state = ANEG_STATE_AN_ENABLE;
2614                 }
2615                 break;
2616
2617         case ANEG_STATE_COMPLETE_ACK_INIT:
2618                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2619                         ret = ANEG_FAILED;
2620                         break;
2621                 }
2622                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2623                                MR_LP_ADV_HALF_DUPLEX |
2624                                MR_LP_ADV_SYM_PAUSE |
2625                                MR_LP_ADV_ASYM_PAUSE |
2626                                MR_LP_ADV_REMOTE_FAULT1 |
2627                                MR_LP_ADV_REMOTE_FAULT2 |
2628                                MR_LP_ADV_NEXT_PAGE |
2629                                MR_TOGGLE_RX |
2630                                MR_NP_RX);
2631                 if (ap->rxconfig & ANEG_CFG_FD)
2632                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2633                 if (ap->rxconfig & ANEG_CFG_HD)
2634                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2635                 if (ap->rxconfig & ANEG_CFG_PS1)
2636                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2637                 if (ap->rxconfig & ANEG_CFG_PS2)
2638                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2639                 if (ap->rxconfig & ANEG_CFG_RF1)
2640                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2641                 if (ap->rxconfig & ANEG_CFG_RF2)
2642                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2643                 if (ap->rxconfig & ANEG_CFG_NP)
2644                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2645
2646                 ap->link_time = ap->cur_time;
2647
2648                 ap->flags ^= (MR_TOGGLE_TX);
2649                 if (ap->rxconfig & 0x0008)
2650                         ap->flags |= MR_TOGGLE_RX;
2651                 if (ap->rxconfig & ANEG_CFG_NP)
2652                         ap->flags |= MR_NP_RX;
2653                 ap->flags |= MR_PAGE_RX;
2654
2655                 ap->state = ANEG_STATE_COMPLETE_ACK;
2656                 ret = ANEG_TIMER_ENAB;
2657                 break;
2658
2659         case ANEG_STATE_COMPLETE_ACK:
2660                 if (ap->ability_match != 0 &&
2661                     ap->rxconfig == 0) {
2662                         ap->state = ANEG_STATE_AN_ENABLE;
2663                         break;
2664                 }
2665                 delta = ap->cur_time - ap->link_time;
2666                 if (delta > ANEG_STATE_SETTLE_TIME) {
2667                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2668                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2669                         } else {
2670                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2671                                     !(ap->flags & MR_NP_RX)) {
2672                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2673                                 } else {
2674                                         ret = ANEG_FAILED;
2675                                 }
2676                         }
2677                 }
2678                 break;
2679
2680         case ANEG_STATE_IDLE_DETECT_INIT:
2681                 ap->link_time = ap->cur_time;
2682                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2683                 tw32_f(MAC_MODE, tp->mac_mode);
2684                 udelay(40);
2685
2686                 ap->state = ANEG_STATE_IDLE_DETECT;
2687                 ret = ANEG_TIMER_ENAB;
2688                 break;
2689
2690         case ANEG_STATE_IDLE_DETECT:
2691                 if (ap->ability_match != 0 &&
2692                     ap->rxconfig == 0) {
2693                         ap->state = ANEG_STATE_AN_ENABLE;
2694                         break;
2695                 }
2696                 delta = ap->cur_time - ap->link_time;
2697                 if (delta > ANEG_STATE_SETTLE_TIME) {
2698                         /* XXX another gem from the Broadcom driver :( */
2699                         ap->state = ANEG_STATE_LINK_OK;
2700                 }
2701                 break;
2702
2703         case ANEG_STATE_LINK_OK:
2704                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2705                 ret = ANEG_DONE;
2706                 break;
2707
2708         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2709                 /* ??? unimplemented */
2710                 break;
2711
2712         case ANEG_STATE_NEXT_PAGE_WAIT:
2713                 /* ??? unimplemented */
2714                 break;
2715
2716         default:
2717                 ret = ANEG_FAILED;
2718                 break;
2719         };
2720
2721         return ret;
2722 }
2723
2724 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2725 {
2726         int res = 0;
2727         struct tg3_fiber_aneginfo aninfo;
2728         int status = ANEG_FAILED;
2729         unsigned int tick;
2730         u32 tmp;
2731
2732         tw32_f(MAC_TX_AUTO_NEG, 0);
2733
2734         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2735         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2736         udelay(40);
2737
2738         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2739         udelay(40);
2740
2741         memset(&aninfo, 0, sizeof(aninfo));
2742         aninfo.flags |= MR_AN_ENABLE;
2743         aninfo.state = ANEG_STATE_UNKNOWN;
2744         aninfo.cur_time = 0;
2745         tick = 0;
2746         while (++tick < 195000) {
2747                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2748                 if (status == ANEG_DONE || status == ANEG_FAILED)
2749                         break;
2750
2751                 udelay(1);
2752         }
2753
2754         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2755         tw32_f(MAC_MODE, tp->mac_mode);
2756         udelay(40);
2757
2758         *txflags = aninfo.txconfig;
2759         *rxflags = aninfo.flags;
2760
2761         if (status == ANEG_DONE &&
2762             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2763                              MR_LP_ADV_FULL_DUPLEX)))
2764                 res = 1;
2765
2766         return res;
2767 }
2768
2769 static void tg3_init_bcm8002(struct tg3 *tp)
2770 {
2771         u32 mac_status = tr32(MAC_STATUS);
2772         int i;
2773
2774         /* Reset when initting first time or we have a link. */
2775         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2776             !(mac_status & MAC_STATUS_PCS_SYNCED))
2777                 return;
2778
2779         /* Set PLL lock range. */
2780         tg3_writephy(tp, 0x16, 0x8007);
2781
2782         /* SW reset */
2783         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2784
2785         /* Wait for reset to complete. */
2786         /* XXX schedule_timeout() ... */
2787         for (i = 0; i < 500; i++)
2788                 udelay(10);
2789
2790         /* Config mode; select PMA/Ch 1 regs. */
2791         tg3_writephy(tp, 0x10, 0x8411);
2792
2793         /* Enable auto-lock and comdet, select txclk for tx. */
2794         tg3_writephy(tp, 0x11, 0x0a10);
2795
2796         tg3_writephy(tp, 0x18, 0x00a0);
2797         tg3_writephy(tp, 0x16, 0x41ff);
2798
2799         /* Assert and deassert POR. */
2800         tg3_writephy(tp, 0x13, 0x0400);
2801         udelay(40);
2802         tg3_writephy(tp, 0x13, 0x0000);
2803
2804         tg3_writephy(tp, 0x11, 0x0a50);
2805         udelay(40);
2806         tg3_writephy(tp, 0x11, 0x0a10);
2807
2808         /* Wait for signal to stabilize */
2809         /* XXX schedule_timeout() ... */
2810         for (i = 0; i < 15000; i++)
2811                 udelay(10);
2812
2813         /* Deselect the channel register so we can read the PHYID
2814          * later.
2815          */
2816         tg3_writephy(tp, 0x10, 0x8011);
2817 }
2818
2819 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2820 {
2821         u16 flowctrl;
2822         u32 sg_dig_ctrl, sg_dig_status;
2823         u32 serdes_cfg, expected_sg_dig_ctrl;
2824         int workaround, port_a;
2825         int current_link_up;
2826
2827         serdes_cfg = 0;
2828         expected_sg_dig_ctrl = 0;
2829         workaround = 0;
2830         port_a = 1;
2831         current_link_up = 0;
2832
2833         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2834             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2835                 workaround = 1;
2836                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2837                         port_a = 0;
2838
2839                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2840                 /* preserve bits 20-23 for voltage regulator */
2841                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2842         }
2843
2844         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2845
2846         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2847                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2848                         if (workaround) {
2849                                 u32 val = serdes_cfg;
2850
2851                                 if (port_a)
2852                                         val |= 0xc010000;
2853                                 else
2854                                         val |= 0x4010000;
2855                                 tw32_f(MAC_SERDES_CFG, val);
2856                         }
2857
2858                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2859                 }
2860                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2861                         tg3_setup_flow_control(tp, 0, 0);
2862                         current_link_up = 1;
2863                 }
2864                 goto out;
2865         }
2866
2867         /* Want auto-negotiation.  */
2868         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2869
2870         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2871         if (flowctrl & ADVERTISE_1000XPAUSE)
2872                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2873         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2874                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2875
2876         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2877                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2878                     tp->serdes_counter &&
2879                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2880                                     MAC_STATUS_RCVD_CFG)) ==
2881                      MAC_STATUS_PCS_SYNCED)) {
2882                         tp->serdes_counter--;
2883                         current_link_up = 1;
2884                         goto out;
2885                 }
2886 restart_autoneg:
2887                 if (workaround)
2888                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2889                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2890                 udelay(5);
2891                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2892
2893                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2894                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2895         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2896                                  MAC_STATUS_SIGNAL_DET)) {
2897                 sg_dig_status = tr32(SG_DIG_STATUS);
2898                 mac_status = tr32(MAC_STATUS);
2899
2900                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2901                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2902                         u32 local_adv = 0, remote_adv = 0;
2903
2904                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2905                                 local_adv |= ADVERTISE_1000XPAUSE;
2906                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2907                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2908
2909                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2910                                 remote_adv |= LPA_1000XPAUSE;
2911                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2912                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2913
2914                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2915                         current_link_up = 1;
2916                         tp->serdes_counter = 0;
2917                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2918                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2919                         if (tp->serdes_counter)
2920                                 tp->serdes_counter--;
2921                         else {
2922                                 if (workaround) {
2923                                         u32 val = serdes_cfg;
2924
2925                                         if (port_a)
2926                                                 val |= 0xc010000;
2927                                         else
2928                                                 val |= 0x4010000;
2929
2930                                         tw32_f(MAC_SERDES_CFG, val);
2931                                 }
2932
2933                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2934                                 udelay(40);
2935
2936                                 /* Link parallel detection - link is up */
2937                                 /* only if we have PCS_SYNC and not */
2938                                 /* receiving config code words */
2939                                 mac_status = tr32(MAC_STATUS);
2940                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2941                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2942                                         tg3_setup_flow_control(tp, 0, 0);
2943                                         current_link_up = 1;
2944                                         tp->tg3_flags2 |=
2945                                                 TG3_FLG2_PARALLEL_DETECT;
2946                                         tp->serdes_counter =
2947                                                 SERDES_PARALLEL_DET_TIMEOUT;
2948                                 } else
2949                                         goto restart_autoneg;
2950                         }
2951                 }
2952         } else {
2953                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2954                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2955         }
2956
2957 out:
2958         return current_link_up;
2959 }
2960
2961 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2962 {
2963         int current_link_up = 0;
2964
2965         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2966                 goto out;
2967
2968         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2969                 u32 txflags, rxflags;
2970                 int i;
2971
2972                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2973                         u32 local_adv = 0, remote_adv = 0;
2974
2975                         if (txflags & ANEG_CFG_PS1)
2976                                 local_adv |= ADVERTISE_1000XPAUSE;
2977                         if (txflags & ANEG_CFG_PS2)
2978                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2979
2980                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
2981                                 remote_adv |= LPA_1000XPAUSE;
2982                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2983                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2984
2985                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2986
2987                         current_link_up = 1;
2988                 }
2989                 for (i = 0; i < 30; i++) {
2990                         udelay(20);
2991                         tw32_f(MAC_STATUS,
2992                                (MAC_STATUS_SYNC_CHANGED |
2993                                 MAC_STATUS_CFG_CHANGED));
2994                         udelay(40);
2995                         if ((tr32(MAC_STATUS) &
2996                              (MAC_STATUS_SYNC_CHANGED |
2997                               MAC_STATUS_CFG_CHANGED)) == 0)
2998                                 break;
2999                 }
3000
3001                 mac_status = tr32(MAC_STATUS);
3002                 if (current_link_up == 0 &&
3003                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3004                     !(mac_status & MAC_STATUS_RCVD_CFG))
3005                         current_link_up = 1;
3006         } else {
3007                 tg3_setup_flow_control(tp, 0, 0);
3008
3009                 /* Forcing 1000FD link up. */
3010                 current_link_up = 1;
3011
3012                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3013                 udelay(40);
3014
3015                 tw32_f(MAC_MODE, tp->mac_mode);
3016                 udelay(40);
3017         }
3018
3019 out:
3020         return current_link_up;
3021 }
3022
3023 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3024 {
3025         u32 orig_pause_cfg;
3026         u16 orig_active_speed;
3027         u8 orig_active_duplex;
3028         u32 mac_status;
3029         int current_link_up;
3030         int i;
3031
3032         orig_pause_cfg = tp->link_config.active_flowctrl;
3033         orig_active_speed = tp->link_config.active_speed;
3034         orig_active_duplex = tp->link_config.active_duplex;
3035
3036         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3037             netif_carrier_ok(tp->dev) &&
3038             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3039                 mac_status = tr32(MAC_STATUS);
3040                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3041                                MAC_STATUS_SIGNAL_DET |
3042                                MAC_STATUS_CFG_CHANGED |
3043                                MAC_STATUS_RCVD_CFG);
3044                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3045                                    MAC_STATUS_SIGNAL_DET)) {
3046                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3047                                             MAC_STATUS_CFG_CHANGED));
3048                         return 0;
3049                 }
3050         }
3051
3052         tw32_f(MAC_TX_AUTO_NEG, 0);
3053
3054         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3055         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3056         tw32_f(MAC_MODE, tp->mac_mode);
3057         udelay(40);
3058
3059         if (tp->phy_id == PHY_ID_BCM8002)
3060                 tg3_init_bcm8002(tp);
3061
3062         /* Enable link change event even when serdes polling.  */
3063         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3064         udelay(40);
3065
3066         current_link_up = 0;
3067         mac_status = tr32(MAC_STATUS);
3068
3069         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3070                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3071         else
3072                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3073
3074         tp->hw_status->status =
3075                 (SD_STATUS_UPDATED |
3076                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3077
3078         for (i = 0; i < 100; i++) {
3079                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3080                                     MAC_STATUS_CFG_CHANGED));
3081                 udelay(5);
3082                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3083                                          MAC_STATUS_CFG_CHANGED |
3084                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3085                         break;
3086         }
3087
3088         mac_status = tr32(MAC_STATUS);
3089         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3090                 current_link_up = 0;
3091                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3092                     tp->serdes_counter == 0) {
3093                         tw32_f(MAC_MODE, (tp->mac_mode |
3094                                           MAC_MODE_SEND_CONFIGS));
3095                         udelay(1);
3096                         tw32_f(MAC_MODE, tp->mac_mode);
3097                 }
3098         }
3099
3100         if (current_link_up == 1) {
3101                 tp->link_config.active_speed = SPEED_1000;
3102                 tp->link_config.active_duplex = DUPLEX_FULL;
3103                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3104                                     LED_CTRL_LNKLED_OVERRIDE |
3105                                     LED_CTRL_1000MBPS_ON));
3106         } else {
3107                 tp->link_config.active_speed = SPEED_INVALID;
3108                 tp->link_config.active_duplex = DUPLEX_INVALID;
3109                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3110                                     LED_CTRL_LNKLED_OVERRIDE |
3111                                     LED_CTRL_TRAFFIC_OVERRIDE));
3112         }
3113
3114         if (current_link_up != netif_carrier_ok(tp->dev)) {
3115                 if (current_link_up)
3116                         netif_carrier_on(tp->dev);
3117                 else
3118                         netif_carrier_off(tp->dev);
3119                 tg3_link_report(tp);
3120         } else {
3121                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3122                 if (orig_pause_cfg != now_pause_cfg ||
3123                     orig_active_speed != tp->link_config.active_speed ||
3124                     orig_active_duplex != tp->link_config.active_duplex)
3125                         tg3_link_report(tp);
3126         }
3127
3128         return 0;
3129 }
3130
3131 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3132 {
3133         int current_link_up, err = 0;
3134         u32 bmsr, bmcr;
3135         u16 current_speed;
3136         u8 current_duplex;
3137         u32 local_adv, remote_adv;
3138
3139         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3140         tw32_f(MAC_MODE, tp->mac_mode);
3141         udelay(40);
3142
3143         tw32(MAC_EVENT, 0);
3144
3145         tw32_f(MAC_STATUS,
3146              (MAC_STATUS_SYNC_CHANGED |
3147               MAC_STATUS_CFG_CHANGED |
3148               MAC_STATUS_MI_COMPLETION |
3149               MAC_STATUS_LNKSTATE_CHANGED));
3150         udelay(40);
3151
3152         if (force_reset)
3153                 tg3_phy_reset(tp);
3154
3155         current_link_up = 0;
3156         current_speed = SPEED_INVALID;
3157         current_duplex = DUPLEX_INVALID;
3158
3159         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3160         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3161         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3162                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3163                         bmsr |= BMSR_LSTATUS;
3164                 else
3165                         bmsr &= ~BMSR_LSTATUS;
3166         }
3167
3168         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3169
3170         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3171             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3172                 /* do nothing, just check for link up at the end */
3173         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3174                 u32 adv, new_adv;
3175
3176                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3177                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3178                                   ADVERTISE_1000XPAUSE |
3179                                   ADVERTISE_1000XPSE_ASYM |
3180                                   ADVERTISE_SLCT);
3181
3182                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3183
3184                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3185                         new_adv |= ADVERTISE_1000XHALF;
3186                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3187                         new_adv |= ADVERTISE_1000XFULL;
3188
3189                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3190                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3191                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3192                         tg3_writephy(tp, MII_BMCR, bmcr);
3193
3194                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3195                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3196                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3197
3198                         return err;
3199                 }
3200         } else {
3201                 u32 new_bmcr;
3202
3203                 bmcr &= ~BMCR_SPEED1000;
3204                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3205
3206                 if (tp->link_config.duplex == DUPLEX_FULL)
3207                         new_bmcr |= BMCR_FULLDPLX;
3208
3209                 if (new_bmcr != bmcr) {
3210                         /* BMCR_SPEED1000 is a reserved bit that needs
3211                          * to be set on write.
3212                          */
3213                         new_bmcr |= BMCR_SPEED1000;
3214
3215                         /* Force a linkdown */
3216                         if (netif_carrier_ok(tp->dev)) {
3217                                 u32 adv;
3218
3219                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3220                                 adv &= ~(ADVERTISE_1000XFULL |
3221                                          ADVERTISE_1000XHALF |
3222                                          ADVERTISE_SLCT);
3223                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3224                                 tg3_writephy(tp, MII_BMCR, bmcr |
3225                                                            BMCR_ANRESTART |
3226                                                            BMCR_ANENABLE);
3227                                 udelay(10);
3228                                 netif_carrier_off(tp->dev);
3229                         }
3230                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3231                         bmcr = new_bmcr;
3232                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3233                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3234                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3235                             ASIC_REV_5714) {
3236                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3237                                         bmsr |= BMSR_LSTATUS;
3238                                 else
3239                                         bmsr &= ~BMSR_LSTATUS;
3240                         }
3241                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3242                 }
3243         }
3244
3245         if (bmsr & BMSR_LSTATUS) {
3246                 current_speed = SPEED_1000;
3247                 current_link_up = 1;
3248                 if (bmcr & BMCR_FULLDPLX)
3249                         current_duplex = DUPLEX_FULL;
3250                 else
3251                         current_duplex = DUPLEX_HALF;
3252
3253                 local_adv = 0;
3254                 remote_adv = 0;
3255
3256                 if (bmcr & BMCR_ANENABLE) {
3257                         u32 common;
3258
3259                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3260                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3261                         common = local_adv & remote_adv;
3262                         if (common & (ADVERTISE_1000XHALF |
3263                                       ADVERTISE_1000XFULL)) {
3264                                 if (common & ADVERTISE_1000XFULL)
3265                                         current_duplex = DUPLEX_FULL;
3266                                 else
3267                                         current_duplex = DUPLEX_HALF;
3268                         }
3269                         else
3270                                 current_link_up = 0;
3271                 }
3272         }
3273
3274         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3275                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3276
3277         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3278         if (tp->link_config.active_duplex == DUPLEX_HALF)
3279                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3280
3281         tw32_f(MAC_MODE, tp->mac_mode);
3282         udelay(40);
3283
3284         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3285
3286         tp->link_config.active_speed = current_speed;
3287         tp->link_config.active_duplex = current_duplex;
3288
3289         if (current_link_up != netif_carrier_ok(tp->dev)) {
3290                 if (current_link_up)
3291                         netif_carrier_on(tp->dev);
3292                 else {
3293                         netif_carrier_off(tp->dev);
3294                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3295                 }
3296                 tg3_link_report(tp);
3297         }
3298         return err;
3299 }
3300
3301 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3302 {
3303         if (tp->serdes_counter) {
3304                 /* Give autoneg time to complete. */
3305                 tp->serdes_counter--;
3306                 return;
3307         }
3308         if (!netif_carrier_ok(tp->dev) &&
3309             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3310                 u32 bmcr;
3311
3312                 tg3_readphy(tp, MII_BMCR, &bmcr);
3313                 if (bmcr & BMCR_ANENABLE) {
3314                         u32 phy1, phy2;
3315
3316                         /* Select shadow register 0x1f */
3317                         tg3_writephy(tp, 0x1c, 0x7c00);
3318                         tg3_readphy(tp, 0x1c, &phy1);
3319
3320                         /* Select expansion interrupt status register */
3321                         tg3_writephy(tp, 0x17, 0x0f01);
3322                         tg3_readphy(tp, 0x15, &phy2);
3323                         tg3_readphy(tp, 0x15, &phy2);
3324
3325                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3326                                 /* We have signal detect and not receiving
3327                                  * config code words, link is up by parallel
3328                                  * detection.
3329                                  */
3330
3331                                 bmcr &= ~BMCR_ANENABLE;
3332                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3333                                 tg3_writephy(tp, MII_BMCR, bmcr);
3334                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3335                         }
3336                 }
3337         }
3338         else if (netif_carrier_ok(tp->dev) &&
3339                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3340                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3341                 u32 phy2;
3342
3343                 /* Select expansion interrupt status register */
3344                 tg3_writephy(tp, 0x17, 0x0f01);
3345                 tg3_readphy(tp, 0x15, &phy2);
3346                 if (phy2 & 0x20) {
3347                         u32 bmcr;
3348
3349                         /* Config code words received, turn on autoneg. */
3350                         tg3_readphy(tp, MII_BMCR, &bmcr);
3351                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3352
3353                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3354
3355                 }
3356         }
3357 }
3358
3359 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3360 {
3361         int err;
3362
3363         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3364                 err = tg3_setup_fiber_phy(tp, force_reset);
3365         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3366                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3367         } else {
3368                 err = tg3_setup_copper_phy(tp, force_reset);
3369         }
3370
3371         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3372             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3373                 u32 val, scale;
3374
3375                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3376                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3377                         scale = 65;
3378                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3379                         scale = 6;
3380                 else
3381                         scale = 12;
3382
3383                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3384                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3385                 tw32(GRC_MISC_CFG, val);
3386         }
3387
3388         if (tp->link_config.active_speed == SPEED_1000 &&
3389             tp->link_config.active_duplex == DUPLEX_HALF)
3390                 tw32(MAC_TX_LENGTHS,
3391                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3392                       (6 << TX_LENGTHS_IPG_SHIFT) |
3393                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3394         else
3395                 tw32(MAC_TX_LENGTHS,
3396                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3397                       (6 << TX_LENGTHS_IPG_SHIFT) |
3398                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3399
3400         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3401                 if (netif_carrier_ok(tp->dev)) {
3402                         tw32(HOSTCC_STAT_COAL_TICKS,
3403                              tp->coal.stats_block_coalesce_usecs);
3404                 } else {
3405                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3406                 }
3407         }
3408
3409         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3410                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3411                 if (!netif_carrier_ok(tp->dev))
3412                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3413                               tp->pwrmgmt_thresh;
3414                 else
3415                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3416                 tw32(PCIE_PWR_MGMT_THRESH, val);
3417         }
3418
3419         return err;
3420 }
3421
3422 /* This is called whenever we suspect that the system chipset is re-
3423  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3424  * is bogus tx completions. We try to recover by setting the
3425  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3426  * in the workqueue.
3427  */
3428 static void tg3_tx_recover(struct tg3 *tp)
3429 {
3430         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3431                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3432
3433         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3434                "mapped I/O cycles to the network device, attempting to "
3435                "recover. Please report the problem to the driver maintainer "
3436                "and include system chipset information.\n", tp->dev->name);
3437
3438         spin_lock(&tp->lock);
3439         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3440         spin_unlock(&tp->lock);
3441 }
3442
3443 static inline u32 tg3_tx_avail(struct tg3 *tp)
3444 {
3445         smp_mb();
3446         return (tp->tx_pending -
3447                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3448 }
3449
3450 /* Tigon3 never reports partial packet sends.  So we do not
3451  * need special logic to handle SKBs that have not had all
3452  * of their frags sent yet, like SunGEM does.
3453  */
3454 static void tg3_tx(struct tg3 *tp)
3455 {
3456         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3457         u32 sw_idx = tp->tx_cons;
3458
3459         while (sw_idx != hw_idx) {
3460                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3461                 struct sk_buff *skb = ri->skb;
3462                 int i, tx_bug = 0;
3463
3464                 if (unlikely(skb == NULL)) {
3465                         tg3_tx_recover(tp);
3466                         return;
3467                 }
3468
3469                 pci_unmap_single(tp->pdev,
3470                                  pci_unmap_addr(ri, mapping),
3471                                  skb_headlen(skb),
3472                                  PCI_DMA_TODEVICE);
3473
3474                 ri->skb = NULL;
3475
3476                 sw_idx = NEXT_TX(sw_idx);
3477
3478                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3479                         ri = &tp->tx_buffers[sw_idx];
3480                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3481                                 tx_bug = 1;
3482
3483                         pci_unmap_page(tp->pdev,
3484                                        pci_unmap_addr(ri, mapping),
3485                                        skb_shinfo(skb)->frags[i].size,
3486                                        PCI_DMA_TODEVICE);
3487
3488                         sw_idx = NEXT_TX(sw_idx);
3489                 }
3490
3491                 dev_kfree_skb(skb);
3492
3493                 if (unlikely(tx_bug)) {
3494                         tg3_tx_recover(tp);
3495                         return;
3496                 }
3497         }
3498
3499         tp->tx_cons = sw_idx;
3500
3501         /* Need to make the tx_cons update visible to tg3_start_xmit()
3502          * before checking for netif_queue_stopped().  Without the
3503          * memory barrier, there is a small possibility that tg3_start_xmit()
3504          * will miss it and cause the queue to be stopped forever.
3505          */
3506         smp_mb();
3507
3508         if (unlikely(netif_queue_stopped(tp->dev) &&
3509                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3510                 netif_tx_lock(tp->dev);
3511                 if (netif_queue_stopped(tp->dev) &&
3512                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3513                         netif_wake_queue(tp->dev);
3514                 netif_tx_unlock(tp->dev);
3515         }
3516 }
3517
3518 /* Returns size of skb allocated or < 0 on error.
3519  *
3520  * We only need to fill in the address because the other members
3521  * of the RX descriptor are invariant, see tg3_init_rings.
3522  *
3523  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3524  * posting buffers we only dirty the first cache line of the RX
3525  * descriptor (containing the address).  Whereas for the RX status
3526  * buffers the cpu only reads the last cacheline of the RX descriptor
3527  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3528  */
3529 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3530                             int src_idx, u32 dest_idx_unmasked)
3531 {
3532         struct tg3_rx_buffer_desc *desc;
3533         struct ring_info *map, *src_map;
3534         struct sk_buff *skb;
3535         dma_addr_t mapping;
3536         int skb_size, dest_idx;
3537
3538         src_map = NULL;
3539         switch (opaque_key) {
3540         case RXD_OPAQUE_RING_STD:
3541                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3542                 desc = &tp->rx_std[dest_idx];
3543                 map = &tp->rx_std_buffers[dest_idx];
3544                 if (src_idx >= 0)
3545                         src_map = &tp->rx_std_buffers[src_idx];
3546                 skb_size = tp->rx_pkt_buf_sz;
3547                 break;
3548
3549         case RXD_OPAQUE_RING_JUMBO:
3550                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3551                 desc = &tp->rx_jumbo[dest_idx];
3552                 map = &tp->rx_jumbo_buffers[dest_idx];
3553                 if (src_idx >= 0)
3554                         src_map = &tp->rx_jumbo_buffers[src_idx];
3555                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3556                 break;
3557
3558         default:
3559                 return -EINVAL;
3560         };
3561
3562         /* Do not overwrite any of the map or rp information
3563          * until we are sure we can commit to a new buffer.
3564          *
3565          * Callers depend upon this behavior and assume that
3566          * we leave everything unchanged if we fail.
3567          */
3568         skb = netdev_alloc_skb(tp->dev, skb_size);
3569         if (skb == NULL)
3570                 return -ENOMEM;
3571
3572         skb_reserve(skb, tp->rx_offset);
3573
3574         mapping = pci_map_single(tp->pdev, skb->data,
3575                                  skb_size - tp->rx_offset,
3576                                  PCI_DMA_FROMDEVICE);
3577
3578         map->skb = skb;
3579         pci_unmap_addr_set(map, mapping, mapping);
3580
3581         if (src_map != NULL)
3582                 src_map->skb = NULL;
3583
3584         desc->addr_hi = ((u64)mapping >> 32);
3585         desc->addr_lo = ((u64)mapping & 0xffffffff);
3586
3587         return skb_size;
3588 }
3589
3590 /* We only need to move over in the address because the other
3591  * members of the RX descriptor are invariant.  See notes above
3592  * tg3_alloc_rx_skb for full details.
3593  */
3594 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3595                            int src_idx, u32 dest_idx_unmasked)
3596 {
3597         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3598         struct ring_info *src_map, *dest_map;
3599         int dest_idx;
3600
3601         switch (opaque_key) {
3602         case RXD_OPAQUE_RING_STD:
3603                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3604                 dest_desc = &tp->rx_std[dest_idx];
3605                 dest_map = &tp->rx_std_buffers[dest_idx];
3606                 src_desc = &tp->rx_std[src_idx];
3607                 src_map = &tp->rx_std_buffers[src_idx];
3608                 break;
3609
3610         case RXD_OPAQUE_RING_JUMBO:
3611                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3612                 dest_desc = &tp->rx_jumbo[dest_idx];
3613                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3614                 src_desc = &tp->rx_jumbo[src_idx];
3615                 src_map = &tp->rx_jumbo_buffers[src_idx];
3616                 break;
3617
3618         default:
3619                 return;
3620         };
3621
3622         dest_map->skb = src_map->skb;
3623         pci_unmap_addr_set(dest_map, mapping,
3624                            pci_unmap_addr(src_map, mapping));
3625         dest_desc->addr_hi = src_desc->addr_hi;
3626         dest_desc->addr_lo = src_desc->addr_lo;
3627
3628         src_map->skb = NULL;
3629 }
3630
3631 #if TG3_VLAN_TAG_USED
3632 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3633 {
3634         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3635 }
3636 #endif
3637
3638 /* The RX ring scheme is composed of multiple rings which post fresh
3639  * buffers to the chip, and one special ring the chip uses to report
3640  * status back to the host.
3641  *
3642  * The special ring reports the status of received packets to the
3643  * host.  The chip does not write into the original descriptor the
3644  * RX buffer was obtained from.  The chip simply takes the original
3645  * descriptor as provided by the host, updates the status and length
3646  * field, then writes this into the next status ring entry.
3647  *
3648  * Each ring the host uses to post buffers to the chip is described
3649  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3650  * it is first placed into the on-chip ram.  When the packet's length
3651  * is known, it walks down the TG3_BDINFO entries to select the ring.
3652  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3653  * which is within the range of the new packet's length is chosen.
3654  *
3655  * The "separate ring for rx status" scheme may sound queer, but it makes
3656  * sense from a cache coherency perspective.  If only the host writes
3657  * to the buffer post rings, and only the chip writes to the rx status
3658  * rings, then cache lines never move beyond shared-modified state.
3659  * If both the host and chip were to write into the same ring, cache line
3660  * eviction could occur since both entities want it in an exclusive state.
3661  */
3662 static int tg3_rx(struct tg3 *tp, int budget)
3663 {
3664         u32 work_mask, rx_std_posted = 0;
3665         u32 sw_idx = tp->rx_rcb_ptr;
3666         u16 hw_idx;
3667         int received;
3668
3669         hw_idx = tp->hw_status->idx[0].rx_producer;
3670         /*
3671          * We need to order the read of hw_idx and the read of
3672          * the opaque cookie.
3673          */
3674         rmb();
3675         work_mask = 0;
3676         received = 0;
3677         while (sw_idx != hw_idx && budget > 0) {
3678                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3679                 unsigned int len;
3680                 struct sk_buff *skb;
3681                 dma_addr_t dma_addr;
3682                 u32 opaque_key, desc_idx, *post_ptr;
3683
3684                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3685                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3686                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3687                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3688                                                   mapping);
3689                         skb = tp->rx_std_buffers[desc_idx].skb;
3690                         post_ptr = &tp->rx_std_ptr;
3691                         rx_std_posted++;
3692                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3693                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3694                                                   mapping);
3695                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3696                         post_ptr = &tp->rx_jumbo_ptr;
3697                 }
3698                 else {
3699                         goto next_pkt_nopost;
3700                 }
3701
3702                 work_mask |= opaque_key;
3703
3704                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3705                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3706                 drop_it:
3707                         tg3_recycle_rx(tp, opaque_key,
3708                                        desc_idx, *post_ptr);
3709                 drop_it_no_recycle:
3710                         /* Other statistics kept track of by card. */
3711                         tp->net_stats.rx_dropped++;
3712                         goto next_pkt;
3713                 }
3714
3715                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3716
3717                 if (len > RX_COPY_THRESHOLD
3718                         && tp->rx_offset == 2
3719                         /* rx_offset != 2 iff this is a 5701 card running
3720                          * in PCI-X mode [see tg3_get_invariants()] */
3721                 ) {
3722                         int skb_size;
3723
3724                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3725                                                     desc_idx, *post_ptr);
3726                         if (skb_size < 0)
3727                                 goto drop_it;
3728
3729                         pci_unmap_single(tp->pdev, dma_addr,
3730                                          skb_size - tp->rx_offset,
3731                                          PCI_DMA_FROMDEVICE);
3732
3733                         skb_put(skb, len);
3734                 } else {
3735                         struct sk_buff *copy_skb;
3736
3737                         tg3_recycle_rx(tp, opaque_key,
3738                                        desc_idx, *post_ptr);
3739
3740                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3741                         if (copy_skb == NULL)
3742                                 goto drop_it_no_recycle;
3743
3744                         skb_reserve(copy_skb, 2);
3745                         skb_put(copy_skb, len);
3746                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3747                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3748                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3749
3750                         /* We'll reuse the original ring buffer. */
3751                         skb = copy_skb;
3752                 }
3753
3754                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3755                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3756                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3757                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3758                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3759                 else
3760                         skb->ip_summed = CHECKSUM_NONE;
3761
3762                 skb->protocol = eth_type_trans(skb, tp->dev);
3763 #if TG3_VLAN_TAG_USED
3764                 if (tp->vlgrp != NULL &&
3765                     desc->type_flags & RXD_FLAG_VLAN) {
3766                         tg3_vlan_rx(tp, skb,
3767                                     desc->err_vlan & RXD_VLAN_MASK);
3768                 } else
3769 #endif
3770                         netif_receive_skb(skb);
3771
3772                 tp->dev->last_rx = jiffies;
3773                 received++;
3774                 budget--;
3775
3776 next_pkt:
3777                 (*post_ptr)++;
3778
3779                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3780                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3781
3782                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3783                                      TG3_64BIT_REG_LOW, idx);
3784                         work_mask &= ~RXD_OPAQUE_RING_STD;
3785                         rx_std_posted = 0;
3786                 }
3787 next_pkt_nopost:
3788                 sw_idx++;
3789                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3790
3791                 /* Refresh hw_idx to see if there is new work */
3792                 if (sw_idx == hw_idx) {
3793                         hw_idx = tp->hw_status->idx[0].rx_producer;
3794                         rmb();
3795                 }
3796         }
3797
3798         /* ACK the status ring. */
3799         tp->rx_rcb_ptr = sw_idx;
3800         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3801
3802         /* Refill RX ring(s). */
3803         if (work_mask & RXD_OPAQUE_RING_STD) {
3804                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3805                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3806                              sw_idx);
3807         }
3808         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3809                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3810                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3811                              sw_idx);
3812         }
3813         mmiowb();
3814
3815         return received;
3816 }
3817
3818 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3819 {
3820         struct tg3_hw_status *sblk = tp->hw_status;
3821
3822         /* handle link change and other phy events */
3823         if (!(tp->tg3_flags &
3824               (TG3_FLAG_USE_LINKCHG_REG |
3825                TG3_FLAG_POLL_SERDES))) {
3826                 if (sblk->status & SD_STATUS_LINK_CHG) {
3827                         sblk->status = SD_STATUS_UPDATED |
3828                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3829                         spin_lock(&tp->lock);
3830                         tg3_setup_phy(tp, 0);
3831                         spin_unlock(&tp->lock);
3832                 }
3833         }
3834
3835         /* run TX completion thread */
3836         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3837                 tg3_tx(tp);
3838                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3839                         return work_done;
3840         }
3841
3842         /* run RX thread, within the bounds set by NAPI.
3843          * All RX "locking" is done by ensuring outside
3844          * code synchronizes with tg3->napi.poll()
3845          */
3846         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3847                 work_done += tg3_rx(tp, budget - work_done);
3848
3849         return work_done;
3850 }
3851
3852 static int tg3_poll(struct napi_struct *napi, int budget)
3853 {
3854         struct tg3 *tp = container_of(napi, struct tg3, napi);
3855         int work_done = 0;
3856         struct tg3_hw_status *sblk = tp->hw_status;
3857
3858         while (1) {
3859                 work_done = tg3_poll_work(tp, work_done, budget);
3860
3861                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3862                         goto tx_recovery;
3863
3864                 if (unlikely(work_done >= budget))
3865                         break;
3866
3867                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3868                         /* tp->last_tag is used in tg3_restart_ints() below
3869                          * to tell the hw how much work has been processed,
3870                          * so we must read it before checking for more work.
3871                          */
3872                         tp->last_tag = sblk->status_tag;
3873                         rmb();
3874                 } else
3875                         sblk->status &= ~SD_STATUS_UPDATED;
3876
3877                 if (likely(!tg3_has_work(tp))) {
3878                         netif_rx_complete(tp->dev, napi);
3879                         tg3_restart_ints(tp);
3880                         break;
3881                 }
3882         }
3883
3884         return work_done;
3885
3886 tx_recovery:
3887         /* work_done is guaranteed to be less than budget. */
3888         netif_rx_complete(tp->dev, napi);
3889         schedule_work(&tp->reset_task);
3890         return work_done;
3891 }
3892
3893 static void tg3_irq_quiesce(struct tg3 *tp)
3894 {
3895         BUG_ON(tp->irq_sync);
3896
3897         tp->irq_sync = 1;
3898         smp_mb();
3899
3900         synchronize_irq(tp->pdev->irq);
3901 }
3902
3903 static inline int tg3_irq_sync(struct tg3 *tp)
3904 {
3905         return tp->irq_sync;
3906 }
3907
3908 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3909  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3910  * with as well.  Most of the time, this is not necessary except when
3911  * shutting down the device.
3912  */
3913 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3914 {
3915         spin_lock_bh(&tp->lock);
3916         if (irq_sync)
3917                 tg3_irq_quiesce(tp);
3918 }
3919
3920 static inline void tg3_full_unlock(struct tg3 *tp)
3921 {
3922         spin_unlock_bh(&tp->lock);
3923 }
3924
3925 /* One-shot MSI handler - Chip automatically disables interrupt
3926  * after sending MSI so driver doesn't have to do it.
3927  */
3928 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3929 {
3930         struct net_device *dev = dev_id;
3931         struct tg3 *tp = netdev_priv(dev);
3932
3933         prefetch(tp->hw_status);
3934         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3935
3936         if (likely(!tg3_irq_sync(tp)))
3937                 netif_rx_schedule(dev, &tp->napi);
3938
3939         return IRQ_HANDLED;
3940 }
3941
3942 /* MSI ISR - No need to check for interrupt sharing and no need to
3943  * flush status block and interrupt mailbox. PCI ordering rules
3944  * guarantee that MSI will arrive after the status block.
3945  */
3946 static irqreturn_t tg3_msi(int irq, void *dev_id)
3947 {
3948         struct net_device *dev = dev_id;
3949         struct tg3 *tp = netdev_priv(dev);
3950
3951         prefetch(tp->hw_status);
3952         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3953         /*
3954          * Writing any value to intr-mbox-0 clears PCI INTA# and
3955          * chip-internal interrupt pending events.
3956          * Writing non-zero to intr-mbox-0 additional tells the
3957          * NIC to stop sending us irqs, engaging "in-intr-handler"
3958          * event coalescing.
3959          */
3960         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3961         if (likely(!tg3_irq_sync(tp)))
3962                 netif_rx_schedule(dev, &tp->napi);
3963
3964         return IRQ_RETVAL(1);
3965 }
3966
3967 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3968 {
3969         struct net_device *dev = dev_id;
3970         struct tg3 *tp = netdev_priv(dev);
3971         struct tg3_hw_status *sblk = tp->hw_status;
3972         unsigned int handled = 1;
3973
3974         /* In INTx mode, it is possible for the interrupt to arrive at
3975          * the CPU before the status block posted prior to the interrupt.
3976          * Reading the PCI State register will confirm whether the
3977          * interrupt is ours and will flush the status block.
3978          */
3979         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3980                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3981                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3982                         handled = 0;
3983                         goto out;
3984                 }
3985         }
3986
3987         /*
3988          * Writing any value to intr-mbox-0 clears PCI INTA# and
3989          * chip-internal interrupt pending events.
3990          * Writing non-zero to intr-mbox-0 additional tells the
3991          * NIC to stop sending us irqs, engaging "in-intr-handler"
3992          * event coalescing.
3993          *
3994          * Flush the mailbox to de-assert the IRQ immediately to prevent
3995          * spurious interrupts.  The flush impacts performance but
3996          * excessive spurious interrupts can be worse in some cases.
3997          */
3998         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3999         if (tg3_irq_sync(tp))
4000                 goto out;
4001         sblk->status &= ~SD_STATUS_UPDATED;
4002         if (likely(tg3_has_work(tp))) {
4003                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4004                 netif_rx_schedule(dev, &tp->napi);
4005         } else {
4006                 /* No work, shared interrupt perhaps?  re-enable
4007                  * interrupts, and flush that PCI write
4008                  */
4009                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4010                                0x00000000);
4011         }
4012 out:
4013         return IRQ_RETVAL(handled);
4014 }
4015
4016 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4017 {
4018         struct net_device *dev = dev_id;
4019         struct tg3 *tp = netdev_priv(dev);
4020         struct tg3_hw_status *sblk = tp->hw_status;
4021         unsigned int handled = 1;
4022
4023         /* In INTx mode, it is possible for the interrupt to arrive at
4024          * the CPU before the status block posted prior to the interrupt.
4025          * Reading the PCI State register will confirm whether the
4026          * interrupt is ours and will flush the status block.
4027          */
4028         if (unlikely(sblk->status_tag == tp->last_tag)) {
4029                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4030                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4031                         handled = 0;
4032                         goto out;
4033                 }
4034         }
4035
4036         /*
4037          * writing any value to intr-mbox-0 clears PCI INTA# and
4038          * chip-internal interrupt pending events.
4039          * writing non-zero to intr-mbox-0 additional tells the
4040          * NIC to stop sending us irqs, engaging "in-intr-handler"
4041          * event coalescing.
4042          *
4043          * Flush the mailbox to de-assert the IRQ immediately to prevent
4044          * spurious interrupts.  The flush impacts performance but
4045          * excessive spurious interrupts can be worse in some cases.
4046          */
4047         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4048         if (tg3_irq_sync(tp))
4049                 goto out;
4050         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4051                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4052                 /* Update last_tag to mark that this status has been
4053                  * seen. Because interrupt may be shared, we may be
4054                  * racing with tg3_poll(), so only update last_tag
4055                  * if tg3_poll() is not scheduled.
4056                  */
4057                 tp->last_tag = sblk->status_tag;
4058                 __netif_rx_schedule(dev, &tp->napi);
4059         }
4060 out:
4061         return IRQ_RETVAL(handled);
4062 }
4063
4064 /* ISR for interrupt test */
4065 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4066 {
4067         struct net_device *dev = dev_id;
4068         struct tg3 *tp = netdev_priv(dev);
4069         struct tg3_hw_status *sblk = tp->hw_status;
4070
4071         if ((sblk->status & SD_STATUS_UPDATED) ||
4072             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4073                 tg3_disable_ints(tp);
4074                 return IRQ_RETVAL(1);
4075         }
4076         return IRQ_RETVAL(0);
4077 }
4078
4079 static int tg3_init_hw(struct tg3 *, int);
4080 static int tg3_halt(struct tg3 *, int, int);
4081
4082 /* Restart hardware after configuration changes, self-test, etc.
4083  * Invoked with tp->lock held.
4084  */
4085 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4086         __releases(tp->lock)
4087         __acquires(tp->lock)
4088 {
4089         int err;
4090
4091         err = tg3_init_hw(tp, reset_phy);
4092         if (err) {
4093                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4094                        "aborting.\n", tp->dev->name);
4095                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4096                 tg3_full_unlock(tp);
4097                 del_timer_sync(&tp->timer);
4098                 tp->irq_sync = 0;
4099                 napi_enable(&tp->napi);
4100                 dev_close(tp->dev);
4101                 tg3_full_lock(tp, 0);
4102         }
4103         return err;
4104 }
4105
4106 #ifdef CONFIG_NET_POLL_CONTROLLER
4107 static void tg3_poll_controller(struct net_device *dev)
4108 {
4109         struct tg3 *tp = netdev_priv(dev);
4110
4111         tg3_interrupt(tp->pdev->irq, dev);
4112 }
4113 #endif
4114
4115 static void tg3_reset_task(struct work_struct *work)
4116 {
4117         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4118         unsigned int restart_timer;
4119
4120         tg3_full_lock(tp, 0);
4121
4122         if (!netif_running(tp->dev)) {
4123                 tg3_full_unlock(tp);
4124                 return;
4125         }
4126
4127         tg3_full_unlock(tp);
4128
4129         tg3_netif_stop(tp);
4130
4131         tg3_full_lock(tp, 1);
4132
4133         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4134         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4135
4136         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4137                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4138                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4139                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4140                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4141         }
4142
4143         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4144         if (tg3_init_hw(tp, 1))
4145                 goto out;
4146
4147         tg3_netif_start(tp);
4148
4149         if (restart_timer)
4150                 mod_timer(&tp->timer, jiffies + 1);
4151
4152 out:
4153         tg3_full_unlock(tp);
4154 }
4155
4156 static void tg3_dump_short_state(struct tg3 *tp)
4157 {
4158         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4159                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4160         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4161                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4162 }
4163
4164 static void tg3_tx_timeout(struct net_device *dev)
4165 {
4166         struct tg3 *tp = netdev_priv(dev);
4167
4168         if (netif_msg_tx_err(tp)) {
4169                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4170                        dev->name);
4171                 tg3_dump_short_state(tp);
4172         }
4173
4174         schedule_work(&tp->reset_task);
4175 }
4176
4177 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4178 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4179 {
4180         u32 base = (u32) mapping & 0xffffffff;
4181
4182         return ((base > 0xffffdcc0) &&
4183                 (base + len + 8 < base));
4184 }
4185
4186 /* Test for DMA addresses > 40-bit */
4187 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4188                                           int len)
4189 {
4190 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4191         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4192                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4193         return 0;
4194 #else
4195         return 0;
4196 #endif
4197 }
4198
4199 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4200
4201 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4202 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4203                                        u32 last_plus_one, u32 *start,
4204                                        u32 base_flags, u32 mss)
4205 {
4206         struct sk_buff *new_skb;
4207         dma_addr_t new_addr = 0;
4208         u32 entry = *start;
4209         int i, ret = 0;
4210
4211         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4212                 new_skb = skb_copy(skb, GFP_ATOMIC);
4213         else {
4214                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4215
4216                 new_skb = skb_copy_expand(skb,
4217                                           skb_headroom(skb) + more_headroom,
4218                                           skb_tailroom(skb), GFP_ATOMIC);
4219         }
4220
4221         if (!new_skb) {
4222                 ret = -1;
4223         } else {
4224                 /* New SKB is guaranteed to be linear. */
4225                 entry = *start;
4226                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4227                                           PCI_DMA_TODEVICE);
4228                 /* Make sure new skb does not cross any 4G boundaries.
4229                  * Drop the packet if it does.
4230                  */
4231                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4232                         ret = -1;
4233                         dev_kfree_skb(new_skb);
4234                         new_skb = NULL;
4235                 } else {
4236                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4237                                     base_flags, 1 | (mss << 1));
4238                         *start = NEXT_TX(entry);
4239                 }
4240         }
4241
4242         /* Now clean up the sw ring entries. */
4243         i = 0;
4244         while (entry != last_plus_one) {
4245                 int len;
4246
4247                 if (i == 0)
4248                         len = skb_headlen(skb);
4249                 else
4250                         len = skb_shinfo(skb)->frags[i-1].size;
4251                 pci_unmap_single(tp->pdev,
4252                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4253                                  len, PCI_DMA_TODEVICE);
4254                 if (i == 0) {
4255                         tp->tx_buffers[entry].skb = new_skb;
4256                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4257                 } else {
4258                         tp->tx_buffers[entry].skb = NULL;
4259                 }
4260                 entry = NEXT_TX(entry);
4261                 i++;
4262         }
4263
4264         dev_kfree_skb(skb);
4265
4266         return ret;
4267 }
4268
4269 static void tg3_set_txd(struct tg3 *tp, int entry,
4270                         dma_addr_t mapping, int len, u32 flags,
4271                         u32 mss_and_is_end)
4272 {
4273         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4274         int is_end = (mss_and_is_end & 0x1);
4275         u32 mss = (mss_and_is_end >> 1);
4276         u32 vlan_tag = 0;
4277
4278         if (is_end)
4279                 flags |= TXD_FLAG_END;
4280         if (flags & TXD_FLAG_VLAN) {
4281                 vlan_tag = flags >> 16;
4282                 flags &= 0xffff;
4283         }
4284         vlan_tag |= (mss << TXD_MSS_SHIFT);
4285
4286         txd->addr_hi = ((u64) mapping >> 32);
4287         txd->addr_lo = ((u64) mapping & 0xffffffff);
4288         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4289         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4290 }
4291
4292 /* hard_start_xmit for devices that don't have any bugs and
4293  * support TG3_FLG2_HW_TSO_2 only.
4294  */
4295 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4296 {
4297         struct tg3 *tp = netdev_priv(dev);
4298         dma_addr_t mapping;
4299         u32 len, entry, base_flags, mss;
4300
4301         len = skb_headlen(skb);
4302
4303         /* We are running in BH disabled context with netif_tx_lock
4304          * and TX reclaim runs via tp->napi.poll inside of a software
4305          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4306          * no IRQ context deadlocks to worry about either.  Rejoice!
4307          */
4308         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4309                 if (!netif_queue_stopped(dev)) {
4310                         netif_stop_queue(dev);
4311
4312                         /* This is a hard error, log it. */
4313                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4314                                "queue awake!\n", dev->name);
4315                 }
4316                 return NETDEV_TX_BUSY;
4317         }
4318
4319         entry = tp->tx_prod;
4320         base_flags = 0;
4321         mss = 0;
4322         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4323                 int tcp_opt_len, ip_tcp_len;
4324
4325                 if (skb_header_cloned(skb) &&
4326                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4327                         dev_kfree_skb(skb);
4328                         goto out_unlock;
4329                 }
4330
4331                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4332                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4333                 else {
4334                         struct iphdr *iph = ip_hdr(skb);
4335
4336                         tcp_opt_len = tcp_optlen(skb);
4337                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4338
4339                         iph->check = 0;
4340                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4341                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4342                 }
4343
4344                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4345                                TXD_FLAG_CPU_POST_DMA);
4346
4347                 tcp_hdr(skb)->check = 0;
4348
4349         }
4350         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4351                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4352 #if TG3_VLAN_TAG_USED
4353         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4354                 base_flags |= (TXD_FLAG_VLAN |
4355                                (vlan_tx_tag_get(skb) << 16));
4356 #endif
4357
4358         /* Queue skb data, a.k.a. the main skb fragment. */
4359         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4360
4361         tp->tx_buffers[entry].skb = skb;
4362         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4363
4364         tg3_set_txd(tp, entry, mapping, len, base_flags,
4365                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4366
4367         entry = NEXT_TX(entry);
4368
4369         /* Now loop through additional data fragments, and queue them. */
4370         if (skb_shinfo(skb)->nr_frags > 0) {
4371                 unsigned int i, last;
4372
4373                 last = skb_shinfo(skb)->nr_frags - 1;
4374                 for (i = 0; i <= last; i++) {
4375                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4376
4377                         len = frag->size;
4378                         mapping = pci_map_page(tp->pdev,
4379                                                frag->page,
4380                                                frag->page_offset,
4381                                                len, PCI_DMA_TODEVICE);
4382
4383                         tp->tx_buffers[entry].skb = NULL;
4384                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4385
4386                         tg3_set_txd(tp, entry, mapping, len,
4387                                     base_flags, (i == last) | (mss << 1));
4388
4389                         entry = NEXT_TX(entry);
4390                 }
4391         }
4392
4393         /* Packets are ready, update Tx producer idx local and on card. */
4394         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4395
4396         tp->tx_prod = entry;
4397         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4398                 netif_stop_queue(dev);
4399                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4400                         netif_wake_queue(tp->dev);
4401         }
4402
4403 out_unlock:
4404         mmiowb();
4405
4406         dev->trans_start = jiffies;
4407
4408         return NETDEV_TX_OK;
4409 }
4410
4411 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4412
4413 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4414  * TSO header is greater than 80 bytes.
4415  */
4416 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4417 {
4418         struct sk_buff *segs, *nskb;
4419
4420         /* Estimate the number of fragments in the worst case */
4421         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4422                 netif_stop_queue(tp->dev);
4423                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4424                         return NETDEV_TX_BUSY;
4425
4426                 netif_wake_queue(tp->dev);
4427         }
4428
4429         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4430         if (IS_ERR(segs))
4431                 goto tg3_tso_bug_end;
4432
4433         do {
4434                 nskb = segs;
4435                 segs = segs->next;
4436                 nskb->next = NULL;
4437                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4438         } while (segs);
4439
4440 tg3_tso_bug_end:
4441         dev_kfree_skb(skb);
4442
4443         return NETDEV_TX_OK;
4444 }
4445
4446 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4447  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4448  */
4449 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4450 {
4451         struct tg3 *tp = netdev_priv(dev);
4452         dma_addr_t mapping;
4453         u32 len, entry, base_flags, mss;
4454         int would_hit_hwbug;
4455
4456         len = skb_headlen(skb);
4457
4458         /* We are running in BH disabled context with netif_tx_lock
4459          * and TX reclaim runs via tp->napi.poll inside of a software
4460          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4461          * no IRQ context deadlocks to worry about either.  Rejoice!
4462          */
4463         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4464                 if (!netif_queue_stopped(dev)) {
4465                         netif_stop_queue(dev);
4466
4467                         /* This is a hard error, log it. */
4468                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4469                                "queue awake!\n", dev->name);
4470                 }
4471                 return NETDEV_TX_BUSY;
4472         }
4473
4474         entry = tp->tx_prod;
4475         base_flags = 0;
4476         if (skb->ip_summed == CHECKSUM_PARTIAL)
4477                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4478         mss = 0;
4479         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4480                 struct iphdr *iph;
4481                 int tcp_opt_len, ip_tcp_len, hdr_len;
4482
4483                 if (skb_header_cloned(skb) &&
4484                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4485                         dev_kfree_skb(skb);
4486                         goto out_unlock;
4487                 }
4488
4489                 tcp_opt_len = tcp_optlen(skb);
4490                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4491
4492                 hdr_len = ip_tcp_len + tcp_opt_len;
4493                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4494                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4495                         return (tg3_tso_bug(tp, skb));
4496
4497                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4498                                TXD_FLAG_CPU_POST_DMA);
4499
4500                 iph = ip_hdr(skb);
4501                 iph->check = 0;
4502                 iph->tot_len = htons(mss + hdr_len);
4503                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4504                         tcp_hdr(skb)->check = 0;
4505                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4506                 } else
4507                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4508                                                                  iph->daddr, 0,
4509                                                                  IPPROTO_TCP,
4510                                                                  0);
4511
4512                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4513                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4514                         if (tcp_opt_len || iph->ihl > 5) {
4515                                 int tsflags;
4516
4517                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4518                                 mss |= (tsflags << 11);
4519                         }
4520                 } else {
4521                         if (tcp_opt_len || iph->ihl > 5) {
4522                                 int tsflags;
4523
4524                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4525                                 base_flags |= tsflags << 12;
4526                         }
4527                 }
4528         }
4529 #if TG3_VLAN_TAG_USED
4530         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4531                 base_flags |= (TXD_FLAG_VLAN |
4532                                (vlan_tx_tag_get(skb) << 16));
4533 #endif
4534
4535         /* Queue skb data, a.k.a. the main skb fragment. */
4536         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4537
4538         tp->tx_buffers[entry].skb = skb;
4539         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4540
4541         would_hit_hwbug = 0;
4542
4543         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4544                 would_hit_hwbug = 1;
4545         else if (tg3_4g_overflow_test(mapping, len))
4546                 would_hit_hwbug = 1;
4547
4548         tg3_set_txd(tp, entry, mapping, len, base_flags,
4549                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4550
4551         entry = NEXT_TX(entry);
4552
4553         /* Now loop through additional data fragments, and queue them. */
4554         if (skb_shinfo(skb)->nr_frags > 0) {
4555                 unsigned int i, last;
4556
4557                 last = skb_shinfo(skb)->nr_frags - 1;
4558                 for (i = 0; i <= last; i++) {
4559                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4560
4561                         len = frag->size;
4562                         mapping = pci_map_page(tp->pdev,
4563                                                frag->page,
4564                                                frag->page_offset,
4565                                                len, PCI_DMA_TODEVICE);
4566
4567                         tp->tx_buffers[entry].skb = NULL;
4568                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4569
4570                         if (tg3_4g_overflow_test(mapping, len))
4571                                 would_hit_hwbug = 1;
4572
4573                         if (tg3_40bit_overflow_test(tp, mapping, len))
4574                                 would_hit_hwbug = 1;
4575
4576                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4577                                 tg3_set_txd(tp, entry, mapping, len,
4578                                             base_flags, (i == last)|(mss << 1));
4579                         else
4580                                 tg3_set_txd(tp, entry, mapping, len,
4581                                             base_flags, (i == last));
4582
4583                         entry = NEXT_TX(entry);
4584                 }
4585         }
4586
4587         if (would_hit_hwbug) {
4588                 u32 last_plus_one = entry;
4589                 u32 start;
4590
4591                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4592                 start &= (TG3_TX_RING_SIZE - 1);
4593
4594                 /* If the workaround fails due to memory/mapping
4595                  * failure, silently drop this packet.
4596                  */
4597                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4598                                                 &start, base_flags, mss))
4599                         goto out_unlock;
4600
4601                 entry = start;
4602         }
4603
4604         /* Packets are ready, update Tx producer idx local and on card. */
4605         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4606
4607         tp->tx_prod = entry;
4608         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4609                 netif_stop_queue(dev);
4610                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4611                         netif_wake_queue(tp->dev);
4612         }
4613
4614 out_unlock:
4615         mmiowb();
4616
4617         dev->trans_start = jiffies;
4618
4619         return NETDEV_TX_OK;
4620 }
4621
4622 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4623                                int new_mtu)
4624 {
4625         dev->mtu = new_mtu;
4626
4627         if (new_mtu > ETH_DATA_LEN) {
4628                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4629                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4630                         ethtool_op_set_tso(dev, 0);
4631                 }
4632                 else
4633                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4634         } else {
4635                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4636                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4637                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4638         }
4639 }
4640
4641 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4642 {
4643         struct tg3 *tp = netdev_priv(dev);
4644         int err;
4645
4646         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4647                 return -EINVAL;
4648
4649         if (!netif_running(dev)) {
4650                 /* We'll just catch it later when the
4651                  * device is up'd.
4652                  */
4653                 tg3_set_mtu(dev, tp, new_mtu);
4654                 return 0;
4655         }
4656
4657         tg3_netif_stop(tp);
4658
4659         tg3_full_lock(tp, 1);
4660
4661         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4662
4663         tg3_set_mtu(dev, tp, new_mtu);
4664
4665         err = tg3_restart_hw(tp, 0);
4666
4667         if (!err)
4668                 tg3_netif_start(tp);
4669
4670         tg3_full_unlock(tp);
4671
4672         return err;
4673 }
4674
4675 /* Free up pending packets in all rx/tx rings.
4676  *
4677  * The chip has been shut down and the driver detached from
4678  * the networking, so no interrupts or new tx packets will
4679  * end up in the driver.  tp->{tx,}lock is not held and we are not
4680  * in an interrupt context and thus may sleep.
4681  */
4682 static void tg3_free_rings(struct tg3 *tp)
4683 {
4684         struct ring_info *rxp;
4685         int i;
4686
4687         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4688                 rxp = &tp->rx_std_buffers[i];
4689
4690                 if (rxp->skb == NULL)
4691                         continue;
4692                 pci_unmap_single(tp->pdev,
4693                                  pci_unmap_addr(rxp, mapping),
4694                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4695                                  PCI_DMA_FROMDEVICE);
4696                 dev_kfree_skb_any(rxp->skb);
4697                 rxp->skb = NULL;
4698         }
4699
4700         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4701                 rxp = &tp->rx_jumbo_buffers[i];
4702
4703                 if (rxp->skb == NULL)
4704                         continue;
4705                 pci_unmap_single(tp->pdev,
4706                                  pci_unmap_addr(rxp, mapping),
4707                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4708                                  PCI_DMA_FROMDEVICE);
4709                 dev_kfree_skb_any(rxp->skb);
4710                 rxp->skb = NULL;
4711         }
4712
4713         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4714                 struct tx_ring_info *txp;
4715                 struct sk_buff *skb;
4716                 int j;
4717
4718                 txp = &tp->tx_buffers[i];
4719                 skb = txp->skb;
4720
4721                 if (skb == NULL) {
4722                         i++;
4723                         continue;
4724                 }
4725
4726                 pci_unmap_single(tp->pdev,
4727                                  pci_unmap_addr(txp, mapping),
4728                                  skb_headlen(skb),
4729                                  PCI_DMA_TODEVICE);
4730                 txp->skb = NULL;
4731
4732                 i++;
4733
4734                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4735                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4736                         pci_unmap_page(tp->pdev,
4737                                        pci_unmap_addr(txp, mapping),
4738                                        skb_shinfo(skb)->frags[j].size,
4739                                        PCI_DMA_TODEVICE);
4740                         i++;
4741                 }
4742
4743                 dev_kfree_skb_any(skb);
4744         }
4745 }
4746
4747 /* Initialize tx/rx rings for packet processing.
4748  *
4749  * The chip has been shut down and the driver detached from
4750  * the networking, so no interrupts or new tx packets will
4751  * end up in the driver.  tp->{tx,}lock are held and thus
4752  * we may not sleep.
4753  */
4754 static int tg3_init_rings(struct tg3 *tp)
4755 {
4756         u32 i;
4757
4758         /* Free up all the SKBs. */
4759         tg3_free_rings(tp);
4760
4761         /* Zero out all descriptors. */
4762         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4763         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4764         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4765         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4766
4767         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4768         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4769             (tp->dev->mtu > ETH_DATA_LEN))
4770                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4771
4772         /* Initialize invariants of the rings, we only set this
4773          * stuff once.  This works because the card does not
4774          * write into the rx buffer posting rings.
4775          */
4776         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4777                 struct tg3_rx_buffer_desc *rxd;
4778
4779                 rxd = &tp->rx_std[i];
4780                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4781                         << RXD_LEN_SHIFT;
4782                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4783                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4784                                (i << RXD_OPAQUE_INDEX_SHIFT));
4785         }
4786
4787         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4788                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4789                         struct tg3_rx_buffer_desc *rxd;
4790
4791                         rxd = &tp->rx_jumbo[i];
4792                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4793                                 << RXD_LEN_SHIFT;
4794                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4795                                 RXD_FLAG_JUMBO;
4796                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4797                                (i << RXD_OPAQUE_INDEX_SHIFT));
4798                 }
4799         }
4800
4801         /* Now allocate fresh SKBs for each rx ring. */
4802         for (i = 0; i < tp->rx_pending; i++) {
4803                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4804                         printk(KERN_WARNING PFX
4805                                "%s: Using a smaller RX standard ring, "
4806                                "only %d out of %d buffers were allocated "
4807                                "successfully.\n",
4808                                tp->dev->name, i, tp->rx_pending);
4809                         if (i == 0)
4810                                 return -ENOMEM;
4811                         tp->rx_pending = i;
4812                         break;
4813                 }
4814         }
4815
4816         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4817                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4818                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4819                                              -1, i) < 0) {
4820                                 printk(KERN_WARNING PFX
4821                                        "%s: Using a smaller RX jumbo ring, "
4822                                        "only %d out of %d buffers were "
4823                                        "allocated successfully.\n",
4824                                        tp->dev->name, i, tp->rx_jumbo_pending);
4825                                 if (i == 0) {
4826                                         tg3_free_rings(tp);
4827                                         return -ENOMEM;
4828                                 }
4829                                 tp->rx_jumbo_pending = i;
4830                                 break;
4831                         }
4832                 }
4833         }
4834         return 0;
4835 }
4836
4837 /*
4838  * Must not be invoked with interrupt sources disabled and
4839  * the hardware shutdown down.
4840  */
4841 static void tg3_free_consistent(struct tg3 *tp)
4842 {
4843         kfree(tp->rx_std_buffers);
4844         tp->rx_std_buffers = NULL;
4845         if (tp->rx_std) {
4846                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4847                                     tp->rx_std, tp->rx_std_mapping);
4848                 tp->rx_std = NULL;
4849         }
4850         if (tp->rx_jumbo) {
4851                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4852                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4853                 tp->rx_jumbo = NULL;
4854         }
4855         if (tp->rx_rcb) {
4856                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4857                                     tp->rx_rcb, tp->rx_rcb_mapping);
4858                 tp->rx_rcb = NULL;
4859         }
4860         if (tp->tx_ring) {
4861                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4862                         tp->tx_ring, tp->tx_desc_mapping);
4863                 tp->tx_ring = NULL;
4864         }
4865         if (tp->hw_status) {
4866                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4867                                     tp->hw_status, tp->status_mapping);
4868                 tp->hw_status = NULL;
4869         }
4870         if (tp->hw_stats) {
4871                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4872                                     tp->hw_stats, tp->stats_mapping);
4873                 tp->hw_stats = NULL;
4874         }
4875 }
4876
4877 /*
4878  * Must not be invoked with interrupt sources disabled and
4879  * the hardware shutdown down.  Can sleep.
4880  */
4881 static int tg3_alloc_consistent(struct tg3 *tp)
4882 {
4883         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4884                                       (TG3_RX_RING_SIZE +
4885                                        TG3_RX_JUMBO_RING_SIZE)) +
4886                                      (sizeof(struct tx_ring_info) *
4887                                       TG3_TX_RING_SIZE),
4888                                      GFP_KERNEL);
4889         if (!tp->rx_std_buffers)
4890                 return -ENOMEM;
4891
4892         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4893         tp->tx_buffers = (struct tx_ring_info *)
4894                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4895
4896         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4897                                           &tp->rx_std_mapping);
4898         if (!tp->rx_std)
4899                 goto err_out;
4900
4901         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4902                                             &tp->rx_jumbo_mapping);
4903
4904         if (!tp->rx_jumbo)
4905                 goto err_out;
4906
4907         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4908                                           &tp->rx_rcb_mapping);
4909         if (!tp->rx_rcb)
4910                 goto err_out;
4911
4912         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4913                                            &tp->tx_desc_mapping);
4914         if (!tp->tx_ring)
4915                 goto err_out;
4916
4917         tp->hw_status = pci_alloc_consistent(tp->pdev,
4918                                              TG3_HW_STATUS_SIZE,
4919                                              &tp->status_mapping);
4920         if (!tp->hw_status)
4921                 goto err_out;
4922
4923         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4924                                             sizeof(struct tg3_hw_stats),
4925                                             &tp->stats_mapping);
4926         if (!tp->hw_stats)
4927                 goto err_out;
4928
4929         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4930         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4931
4932         return 0;
4933
4934 err_out:
4935         tg3_free_consistent(tp);
4936         return -ENOMEM;
4937 }
4938
4939 #define MAX_WAIT_CNT 1000
4940
4941 /* To stop a block, clear the enable bit and poll till it
4942  * clears.  tp->lock is held.
4943  */
4944 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4945 {
4946         unsigned int i;
4947         u32 val;
4948
4949         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4950                 switch (ofs) {
4951                 case RCVLSC_MODE:
4952                 case DMAC_MODE:
4953                 case MBFREE_MODE:
4954                 case BUFMGR_MODE:
4955                 case MEMARB_MODE:
4956                         /* We can't enable/disable these bits of the
4957                          * 5705/5750, just say success.
4958                          */
4959                         return 0;
4960
4961                 default:
4962                         break;
4963                 };
4964         }
4965
4966         val = tr32(ofs);
4967         val &= ~enable_bit;
4968         tw32_f(ofs, val);
4969
4970         for (i = 0; i < MAX_WAIT_CNT; i++) {
4971                 udelay(100);
4972                 val = tr32(ofs);
4973                 if ((val & enable_bit) == 0)
4974                         break;
4975         }
4976
4977         if (i == MAX_WAIT_CNT && !silent) {
4978                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4979                        "ofs=%lx enable_bit=%x\n",
4980                        ofs, enable_bit);
4981                 return -ENODEV;
4982         }
4983
4984         return 0;
4985 }
4986
4987 /* tp->lock is held. */
4988 static int tg3_abort_hw(struct tg3 *tp, int silent)
4989 {
4990         int i, err;
4991
4992         tg3_disable_ints(tp);
4993
4994         tp->rx_mode &= ~RX_MODE_ENABLE;
4995         tw32_f(MAC_RX_MODE, tp->rx_mode);
4996         udelay(10);
4997
4998         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4999         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5000         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5001         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5002         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5003         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5004
5005         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5006         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5007         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5008         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5009         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5010         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5011         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5012
5013         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5014         tw32_f(MAC_MODE, tp->mac_mode);
5015         udelay(40);
5016
5017         tp->tx_mode &= ~TX_MODE_ENABLE;
5018         tw32_f(MAC_TX_MODE, tp->tx_mode);
5019
5020         for (i = 0; i < MAX_WAIT_CNT; i++) {
5021                 udelay(100);
5022                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5023                         break;
5024         }
5025         if (i >= MAX_WAIT_CNT) {
5026                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5027                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5028                        tp->dev->name, tr32(MAC_TX_MODE));
5029                 err |= -ENODEV;
5030         }
5031
5032         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5033         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5034         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5035
5036         tw32(FTQ_RESET, 0xffffffff);
5037         tw32(FTQ_RESET, 0x00000000);
5038
5039         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5040         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5041
5042         if (tp->hw_status)
5043                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5044         if (tp->hw_stats)
5045                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5046
5047         return err;
5048 }
5049
5050 /* tp->lock is held. */
5051 static int tg3_nvram_lock(struct tg3 *tp)
5052 {
5053         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5054                 int i;
5055
5056                 if (tp->nvram_lock_cnt == 0) {
5057                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5058                         for (i = 0; i < 8000; i++) {
5059                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5060                                         break;
5061                                 udelay(20);
5062                         }
5063                         if (i == 8000) {
5064                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5065                                 return -ENODEV;
5066                         }
5067                 }
5068                 tp->nvram_lock_cnt++;
5069         }
5070         return 0;
5071 }
5072
5073 /* tp->lock is held. */
5074 static void tg3_nvram_unlock(struct tg3 *tp)
5075 {
5076         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5077                 if (tp->nvram_lock_cnt > 0)
5078                         tp->nvram_lock_cnt--;
5079                 if (tp->nvram_lock_cnt == 0)
5080                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5081         }
5082 }
5083
5084 /* tp->lock is held. */
5085 static void tg3_enable_nvram_access(struct tg3 *tp)
5086 {
5087         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5088             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5089                 u32 nvaccess = tr32(NVRAM_ACCESS);
5090
5091                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5092         }
5093 }
5094
5095 /* tp->lock is held. */
5096 static void tg3_disable_nvram_access(struct tg3 *tp)
5097 {
5098         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5099             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5100                 u32 nvaccess = tr32(NVRAM_ACCESS);
5101
5102                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5103         }
5104 }
5105
5106 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5107 {
5108         int i;
5109         u32 apedata;
5110
5111         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5112         if (apedata != APE_SEG_SIG_MAGIC)
5113                 return;
5114
5115         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5116         if (apedata != APE_FW_STATUS_READY)
5117                 return;
5118
5119         /* Wait for up to 1 millisecond for APE to service previous event. */
5120         for (i = 0; i < 10; i++) {
5121                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5122                         return;
5123
5124                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5125
5126                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5127                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5128                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5129
5130                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5131
5132                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5133                         break;
5134
5135                 udelay(100);
5136         }
5137
5138         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5139                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5140 }
5141
5142 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5143 {
5144         u32 event;
5145         u32 apedata;
5146
5147         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5148                 return;
5149
5150         switch (kind) {
5151                 case RESET_KIND_INIT:
5152                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5153                                         APE_HOST_SEG_SIG_MAGIC);
5154                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5155                                         APE_HOST_SEG_LEN_MAGIC);
5156                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5157                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5158                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5159                                         APE_HOST_DRIVER_ID_MAGIC);
5160                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5161                                         APE_HOST_BEHAV_NO_PHYLOCK);
5162
5163                         event = APE_EVENT_STATUS_STATE_START;
5164                         break;
5165                 case RESET_KIND_SHUTDOWN:
5166                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5167                         break;
5168                 case RESET_KIND_SUSPEND:
5169                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5170                         break;
5171                 default:
5172                         return;
5173         }
5174
5175         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5176
5177         tg3_ape_send_event(tp, event);
5178 }
5179
5180 /* tp->lock is held. */
5181 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5182 {
5183         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5184                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5185
5186         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5187                 switch (kind) {
5188                 case RESET_KIND_INIT:
5189                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5190                                       DRV_STATE_START);
5191                         break;
5192
5193                 case RESET_KIND_SHUTDOWN:
5194                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5195                                       DRV_STATE_UNLOAD);
5196                         break;
5197
5198                 case RESET_KIND_SUSPEND:
5199                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5200                                       DRV_STATE_SUSPEND);
5201                         break;
5202
5203                 default:
5204                         break;
5205                 };
5206         }
5207
5208         if (kind == RESET_KIND_INIT ||
5209             kind == RESET_KIND_SUSPEND)
5210                 tg3_ape_driver_state_change(tp, kind);
5211 }
5212
5213 /* tp->lock is held. */
5214 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5215 {
5216         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5217                 switch (kind) {
5218                 case RESET_KIND_INIT:
5219                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5220                                       DRV_STATE_START_DONE);
5221                         break;
5222
5223                 case RESET_KIND_SHUTDOWN:
5224                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5225                                       DRV_STATE_UNLOAD_DONE);
5226                         break;
5227
5228                 default:
5229                         break;
5230                 };
5231         }
5232
5233         if (kind == RESET_KIND_SHUTDOWN)
5234                 tg3_ape_driver_state_change(tp, kind);
5235 }
5236
5237 /* tp->lock is held. */
5238 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5239 {
5240         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5241                 switch (kind) {
5242                 case RESET_KIND_INIT:
5243                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5244                                       DRV_STATE_START);
5245                         break;
5246
5247                 case RESET_KIND_SHUTDOWN:
5248                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5249                                       DRV_STATE_UNLOAD);
5250                         break;
5251
5252                 case RESET_KIND_SUSPEND:
5253                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5254                                       DRV_STATE_SUSPEND);
5255                         break;
5256
5257                 default:
5258                         break;
5259                 };
5260         }
5261 }
5262
5263 static int tg3_poll_fw(struct tg3 *tp)
5264 {
5265         int i;
5266         u32 val;
5267
5268         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5269                 /* Wait up to 20ms for init done. */
5270                 for (i = 0; i < 200; i++) {
5271                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5272                                 return 0;
5273                         udelay(100);
5274                 }
5275                 return -ENODEV;
5276         }
5277
5278         /* Wait for firmware initialization to complete. */
5279         for (i = 0; i < 100000; i++) {
5280                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5281                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5282                         break;
5283                 udelay(10);
5284         }
5285
5286         /* Chip might not be fitted with firmware.  Some Sun onboard
5287          * parts are configured like that.  So don't signal the timeout
5288          * of the above loop as an error, but do report the lack of
5289          * running firmware once.
5290          */
5291         if (i >= 100000 &&
5292             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5293                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5294
5295                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5296                        tp->dev->name);
5297         }
5298
5299         return 0;
5300 }
5301
5302 /* Save PCI command register before chip reset */
5303 static void tg3_save_pci_state(struct tg3 *tp)
5304 {
5305         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5306 }
5307
5308 /* Restore PCI state after chip reset */
5309 static void tg3_restore_pci_state(struct tg3 *tp)
5310 {
5311         u32 val;
5312
5313         /* Re-enable indirect register accesses. */
5314         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5315                                tp->misc_host_ctrl);
5316
5317         /* Set MAX PCI retry to zero. */
5318         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5319         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5320             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5321                 val |= PCISTATE_RETRY_SAME_DMA;
5322         /* Allow reads and writes to the APE register and memory space. */
5323         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5324                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5325                        PCISTATE_ALLOW_APE_SHMEM_WR;
5326         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5327
5328         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5329
5330         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5331                 pcie_set_readrq(tp->pdev, 4096);
5332         else {
5333                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5334                                       tp->pci_cacheline_sz);
5335                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5336                                       tp->pci_lat_timer);
5337         }
5338
5339         /* Make sure PCI-X relaxed ordering bit is clear. */
5340         if (tp->pcix_cap) {
5341                 u16 pcix_cmd;
5342
5343                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5344                                      &pcix_cmd);
5345                 pcix_cmd &= ~PCI_X_CMD_ERO;
5346                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5347                                       pcix_cmd);
5348         }
5349
5350         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5351
5352                 /* Chip reset on 5780 will reset MSI enable bit,
5353                  * so need to restore it.
5354                  */
5355                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5356                         u16 ctrl;
5357
5358                         pci_read_config_word(tp->pdev,
5359                                              tp->msi_cap + PCI_MSI_FLAGS,
5360                                              &ctrl);
5361                         pci_write_config_word(tp->pdev,
5362                                               tp->msi_cap + PCI_MSI_FLAGS,
5363                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5364                         val = tr32(MSGINT_MODE);
5365                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5366                 }
5367         }
5368 }
5369
5370 static void tg3_stop_fw(struct tg3 *);
5371
5372 /* tp->lock is held. */
5373 static int tg3_chip_reset(struct tg3 *tp)
5374 {
5375         u32 val;
5376         void (*write_op)(struct tg3 *, u32, u32);
5377         int err;
5378
5379         tg3_nvram_lock(tp);
5380
5381         /* No matching tg3_nvram_unlock() after this because
5382          * chip reset below will undo the nvram lock.
5383          */
5384         tp->nvram_lock_cnt = 0;
5385
5386         /* GRC_MISC_CFG core clock reset will clear the memory
5387          * enable bit in PCI register 4 and the MSI enable bit
5388          * on some chips, so we save relevant registers here.
5389          */
5390         tg3_save_pci_state(tp);
5391
5392         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5393             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5394             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5395             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5396             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5397                 tw32(GRC_FASTBOOT_PC, 0);
5398
5399         /*
5400          * We must avoid the readl() that normally takes place.
5401          * It locks machines, causes machine checks, and other
5402          * fun things.  So, temporarily disable the 5701
5403          * hardware workaround, while we do the reset.
5404          */
5405         write_op = tp->write32;
5406         if (write_op == tg3_write_flush_reg32)
5407                 tp->write32 = tg3_write32;
5408
5409         /* Prevent the irq handler from reading or writing PCI registers
5410          * during chip reset when the memory enable bit in the PCI command
5411          * register may be cleared.  The chip does not generate interrupt
5412          * at this time, but the irq handler may still be called due to irq
5413          * sharing or irqpoll.
5414          */
5415         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5416         if (tp->hw_status) {
5417                 tp->hw_status->status = 0;
5418                 tp->hw_status->status_tag = 0;
5419         }
5420         tp->last_tag = 0;
5421         smp_mb();
5422         synchronize_irq(tp->pdev->irq);
5423
5424         /* do the reset */
5425         val = GRC_MISC_CFG_CORECLK_RESET;
5426
5427         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5428                 if (tr32(0x7e2c) == 0x60) {
5429                         tw32(0x7e2c, 0x20);
5430                 }
5431                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5432                         tw32(GRC_MISC_CFG, (1 << 29));
5433                         val |= (1 << 29);
5434                 }
5435         }
5436
5437         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5438                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5439                 tw32(GRC_VCPU_EXT_CTRL,
5440                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5441         }
5442
5443         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5444                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5445         tw32(GRC_MISC_CFG, val);
5446
5447         /* restore 5701 hardware bug workaround write method */
5448         tp->write32 = write_op;
5449
5450         /* Unfortunately, we have to delay before the PCI read back.
5451          * Some 575X chips even will not respond to a PCI cfg access
5452          * when the reset command is given to the chip.
5453          *
5454          * How do these hardware designers expect things to work
5455          * properly if the PCI write is posted for a long period
5456          * of time?  It is always necessary to have some method by
5457          * which a register read back can occur to push the write
5458          * out which does the reset.
5459          *
5460          * For most tg3 variants the trick below was working.
5461          * Ho hum...
5462          */
5463         udelay(120);
5464
5465         /* Flush PCI posted writes.  The normal MMIO registers
5466          * are inaccessible at this time so this is the only
5467          * way to make this reliably (actually, this is no longer
5468          * the case, see above).  I tried to use indirect
5469          * register read/write but this upset some 5701 variants.
5470          */
5471         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5472
5473         udelay(120);
5474
5475         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5476                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5477                         int i;
5478                         u32 cfg_val;
5479
5480                         /* Wait for link training to complete.  */
5481                         for (i = 0; i < 5000; i++)
5482                                 udelay(100);
5483
5484                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5485                         pci_write_config_dword(tp->pdev, 0xc4,
5486                                                cfg_val | (1 << 15));
5487                 }
5488                 /* Set PCIE max payload size and clear error status.  */
5489                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5490         }
5491
5492         tg3_restore_pci_state(tp);
5493
5494         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5495
5496         val = 0;
5497         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5498                 val = tr32(MEMARB_MODE);
5499         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5500
5501         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5502                 tg3_stop_fw(tp);
5503                 tw32(0x5000, 0x400);
5504         }
5505
5506         tw32(GRC_MODE, tp->grc_mode);
5507
5508         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5509                 val = tr32(0xc4);
5510
5511                 tw32(0xc4, val | (1 << 15));
5512         }
5513
5514         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5515             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5516                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5517                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5518                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5519                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5520         }
5521
5522         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5523                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5524                 tw32_f(MAC_MODE, tp->mac_mode);
5525         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5526                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5527                 tw32_f(MAC_MODE, tp->mac_mode);
5528         } else
5529                 tw32_f(MAC_MODE, 0);
5530         udelay(40);
5531
5532         err = tg3_poll_fw(tp);
5533         if (err)
5534                 return err;
5535
5536         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5537             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5538                 val = tr32(0x7c00);
5539
5540                 tw32(0x7c00, val | (1 << 25));
5541         }
5542
5543         /* Reprobe ASF enable state.  */
5544         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5545         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5546         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5547         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5548                 u32 nic_cfg;
5549
5550                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5551                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5552                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5553                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5554                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5555                 }
5556         }
5557
5558         return 0;
5559 }
5560
5561 /* tp->lock is held. */
5562 static void tg3_stop_fw(struct tg3 *tp)
5563 {
5564         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5565            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5566                 u32 val;
5567
5568                 /* Wait for RX cpu to ACK the previous event. */
5569                 tg3_wait_for_event_ack(tp);
5570
5571                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5572                 val = tr32(GRC_RX_CPU_EVENT);
5573                 val |= GRC_RX_CPU_DRIVER_EVENT;
5574                 tw32(GRC_RX_CPU_EVENT, val);
5575
5576                 /* Wait for RX cpu to ACK this event. */
5577                 tg3_wait_for_event_ack(tp);
5578         }
5579 }
5580
5581 /* tp->lock is held. */
5582 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5583 {
5584         int err;
5585
5586         tg3_stop_fw(tp);
5587
5588         tg3_write_sig_pre_reset(tp, kind);
5589
5590         tg3_abort_hw(tp, silent);
5591         err = tg3_chip_reset(tp);
5592
5593         tg3_write_sig_legacy(tp, kind);
5594         tg3_write_sig_post_reset(tp, kind);
5595
5596         if (err)
5597                 return err;
5598
5599         return 0;
5600 }
5601
5602 #define TG3_FW_RELEASE_MAJOR    0x0
5603 #define TG3_FW_RELASE_MINOR     0x0
5604 #define TG3_FW_RELEASE_FIX      0x0
5605 #define TG3_FW_START_ADDR       0x08000000
5606 #define TG3_FW_TEXT_ADDR        0x08000000
5607 #define TG3_FW_TEXT_LEN         0x9c0
5608 #define TG3_FW_RODATA_ADDR      0x080009c0
5609 #define TG3_FW_RODATA_LEN       0x60
5610 #define TG3_FW_DATA_ADDR        0x08000a40
5611 #define TG3_FW_DATA_LEN         0x20
5612 #define TG3_FW_SBSS_ADDR        0x08000a60
5613 #define TG3_FW_SBSS_LEN         0xc
5614 #define TG3_FW_BSS_ADDR         0x08000a70
5615 #define TG3_FW_BSS_LEN          0x10
5616
5617 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5618         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5619         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5620         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5621         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5622         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5623         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5624         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5625         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5626         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5627         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5628         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5629         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5630         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5631         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5632         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5633         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5634         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5635         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5636         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5637         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5638         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5639         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5640         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5641         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5642         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5643         0, 0, 0, 0, 0, 0,
5644         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5645         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5646         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5647         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5648         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5649         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5650         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5651         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5652         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5653         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5654         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5655         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5656         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5657         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5658         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5659         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5660         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5661         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5662         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5663         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5664         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5665         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5666         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5667         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5668         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5669         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5670         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5671         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5672         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5673         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5674         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5675         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5676         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5677         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5678         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5679         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5680         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5681         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5682         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5683         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5684         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5685         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5686         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5687         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5688         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5689         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5690         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5691         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5692         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5693         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5694         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5695         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5696         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5697         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5698         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5699         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5700         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5701         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5702         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5703         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5704         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5705         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5706         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5707         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5708         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5709 };
5710
5711 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5712         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5713         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5714         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5715         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5716         0x00000000
5717 };
5718
5719 #if 0 /* All zeros, don't eat up space with it. */
5720 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5721         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5722         0x00000000, 0x00000000, 0x00000000, 0x00000000
5723 };
5724 #endif
5725
5726 #define RX_CPU_SCRATCH_BASE     0x30000
5727 #define RX_CPU_SCRATCH_SIZE     0x04000
5728 #define TX_CPU_SCRATCH_BASE     0x34000
5729 #define TX_CPU_SCRATCH_SIZE     0x04000
5730
5731 /* tp->lock is held. */
5732 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5733 {
5734         int i;
5735
5736         BUG_ON(offset == TX_CPU_BASE &&
5737             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5738
5739         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5740                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5741
5742                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5743                 return 0;
5744         }
5745         if (offset == RX_CPU_BASE) {
5746                 for (i = 0; i < 10000; i++) {
5747                         tw32(offset + CPU_STATE, 0xffffffff);
5748                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5749                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5750                                 break;
5751                 }
5752
5753                 tw32(offset + CPU_STATE, 0xffffffff);
5754                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5755                 udelay(10);
5756         } else {
5757                 for (i = 0; i < 10000; i++) {
5758                         tw32(offset + CPU_STATE, 0xffffffff);
5759                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5760                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5761                                 break;
5762                 }
5763         }
5764
5765         if (i >= 10000) {
5766                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5767                        "and %s CPU\n",
5768                        tp->dev->name,
5769                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5770                 return -ENODEV;
5771         }
5772
5773         /* Clear firmware's nvram arbitration. */
5774         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5775                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5776         return 0;
5777 }
5778
5779 struct fw_info {
5780         unsigned int text_base;
5781         unsigned int text_len;
5782         const u32 *text_data;
5783         unsigned int rodata_base;
5784         unsigned int rodata_len;
5785         const u32 *rodata_data;
5786         unsigned int data_base;
5787         unsigned int data_len;
5788         const u32 *data_data;
5789 };
5790
5791 /* tp->lock is held. */
5792 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5793                                  int cpu_scratch_size, struct fw_info *info)
5794 {
5795         int err, lock_err, i;
5796         void (*write_op)(struct tg3 *, u32, u32);
5797
5798         if (cpu_base == TX_CPU_BASE &&
5799             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5800                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5801                        "TX cpu firmware on %s which is 5705.\n",
5802                        tp->dev->name);
5803                 return -EINVAL;
5804         }
5805
5806         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5807                 write_op = tg3_write_mem;
5808         else
5809                 write_op = tg3_write_indirect_reg32;
5810
5811         /* It is possible that bootcode is still loading at this point.
5812          * Get the nvram lock first before halting the cpu.
5813          */
5814         lock_err = tg3_nvram_lock(tp);
5815         err = tg3_halt_cpu(tp, cpu_base);
5816         if (!lock_err)
5817                 tg3_nvram_unlock(tp);
5818         if (err)
5819                 goto out;
5820
5821         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5822                 write_op(tp, cpu_scratch_base + i, 0);
5823         tw32(cpu_base + CPU_STATE, 0xffffffff);
5824         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5825         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5826                 write_op(tp, (cpu_scratch_base +
5827                               (info->text_base & 0xffff) +
5828                               (i * sizeof(u32))),
5829                          (info->text_data ?
5830                           info->text_data[i] : 0));
5831         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5832                 write_op(tp, (cpu_scratch_base +
5833                               (info->rodata_base & 0xffff) +
5834                               (i * sizeof(u32))),
5835                          (info->rodata_data ?
5836                           info->rodata_data[i] : 0));
5837         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5838                 write_op(tp, (cpu_scratch_base +
5839                               (info->data_base & 0xffff) +
5840                               (i * sizeof(u32))),
5841                          (info->data_data ?
5842                           info->data_data[i] : 0));
5843
5844         err = 0;
5845
5846 out:
5847         return err;
5848 }
5849
5850 /* tp->lock is held. */
5851 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5852 {
5853         struct fw_info info;
5854         int err, i;
5855
5856         info.text_base = TG3_FW_TEXT_ADDR;
5857         info.text_len = TG3_FW_TEXT_LEN;
5858         info.text_data = &tg3FwText[0];
5859         info.rodata_base = TG3_FW_RODATA_ADDR;
5860         info.rodata_len = TG3_FW_RODATA_LEN;
5861         info.rodata_data = &tg3FwRodata[0];
5862         info.data_base = TG3_FW_DATA_ADDR;
5863         info.data_len = TG3_FW_DATA_LEN;
5864         info.data_data = NULL;
5865
5866         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5867                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5868                                     &info);
5869         if (err)
5870                 return err;
5871
5872         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5873                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5874                                     &info);
5875         if (err)
5876                 return err;
5877
5878         /* Now startup only the RX cpu. */
5879         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5880         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5881
5882         for (i = 0; i < 5; i++) {
5883                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5884                         break;
5885                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5886                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5887                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5888                 udelay(1000);
5889         }
5890         if (i >= 5) {
5891                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5892                        "to set RX CPU PC, is %08x should be %08x\n",
5893                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5894                        TG3_FW_TEXT_ADDR);
5895                 return -ENODEV;
5896         }
5897         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5898         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5899
5900         return 0;
5901 }
5902
5903
5904 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5905 #define TG3_TSO_FW_RELASE_MINOR         0x6
5906 #define TG3_TSO_FW_RELEASE_FIX          0x0
5907 #define TG3_TSO_FW_START_ADDR           0x08000000
5908 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5909 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5910 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5911 #define TG3_TSO_FW_RODATA_LEN           0x60
5912 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5913 #define TG3_TSO_FW_DATA_LEN             0x30
5914 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5915 #define TG3_TSO_FW_SBSS_LEN             0x2c
5916 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5917 #define TG3_TSO_FW_BSS_LEN              0x894
5918
5919 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5920         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5921         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5922         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5923         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5924         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5925         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5926         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5927         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5928         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5929         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5930         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5931         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5932         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5933         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5934         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5935         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5936         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5937         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5938         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5939         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5940         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5941         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5942         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5943         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5944         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5945         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5946         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5947         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5948         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5949         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5950         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5951         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5952         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5953         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5954         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5955         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5956         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5957         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5958         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5959         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5960         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5961         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5962         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5963         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5964         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5965         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5966         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5967         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5968         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5969         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5970         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5971         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5972         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5973         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5974         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5975         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5976         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5977         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5978         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5979         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5980         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5981         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5982         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5983         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5984         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5985         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5986         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5987         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5988         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5989         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5990         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5991         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5992         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5993         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5994         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5995         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5996         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5997         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5998         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5999         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6000         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6001         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6002         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6003         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6004         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6005         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6006         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6007         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6008         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6009         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6010         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6011         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6012         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6013         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6014         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6015         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6016         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6017         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6018         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6019         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6020         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6021         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6022         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6023         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6024         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6025         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6026         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6027         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6028         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6029         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6030         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6031         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6032         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6033         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6034         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6035         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6036         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6037         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6038         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6039         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6040         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6041         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6042         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6043         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6044         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6045         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6046         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6047         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6048         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6049         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6050         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6051         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6052         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6053         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6054         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6055         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6056         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6057         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6058         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6059         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6060         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6061         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6062         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6063         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6064         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6065         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6066         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6067         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6068         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6069         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6070         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6071         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6072         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6073         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6074         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6075         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6076         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6077         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6078         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6079         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6080         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6081         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6082         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6083         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6084         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6085         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6086         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6087         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6088         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6089         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6090         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6091         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6092         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6093         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6094         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6095         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6096         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6097         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6098         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6099         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6100         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6101         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6102         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6103         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6104         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6105         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6106         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6107         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6108         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6109         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6110         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6111         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6112         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6113         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6114         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6115         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6116         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6117         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6118         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6119         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6120         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6121         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6122         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6123         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6124         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6125         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6126         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6127         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6128         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6129         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6130         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6131         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6132         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6133         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6134         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6135         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6136         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6137         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6138         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6139         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6140         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6141         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6142         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6143         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6144         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6145         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6146         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6147         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6148         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6149         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6150         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6151         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6152         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6153         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6154         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6155         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6156         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6157         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6158         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6159         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6160         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6161         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6162         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6163         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6164         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6165         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6166         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6167         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6168         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6169         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6170         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6171         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6172         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6173         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6174         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6175         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6176         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6177         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6178         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6179         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6180         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6181         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6182         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6183         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6184         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6185         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6186         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6187         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6188         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6189         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6190         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6191         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6192         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6193         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6194         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6195         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6196         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6197         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6198         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6199         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6200         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6201         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6202         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6203         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6204 };
6205
6206 static const u32 tg3TsoFwRodata[] = {
6207         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6208         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6209         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6210         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6211         0x00000000,
6212 };
6213
6214 static const u32 tg3TsoFwData[] = {
6215         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6216         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6217         0x00000000,
6218 };
6219
6220 /* 5705 needs a special version of the TSO firmware.  */
6221 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6222 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6223 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6224 #define TG3_TSO5_FW_START_ADDR          0x00010000
6225 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6226 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6227 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6228 #define TG3_TSO5_FW_RODATA_LEN          0x50
6229 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6230 #define TG3_TSO5_FW_DATA_LEN            0x20
6231 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6232 #define TG3_TSO5_FW_SBSS_LEN            0x28
6233 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6234 #define TG3_TSO5_FW_BSS_LEN             0x88
6235
6236 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6237         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6238         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6239         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6240         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6241         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6242         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6243         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6244         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6245         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6246         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6247         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6248         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6249         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6250         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6251         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6252         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6253         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6254         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6255         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6256         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6257         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6258         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6259         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6260         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6261         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6262         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6263         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6264         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6265         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6266         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6267         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6268         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6269         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6270         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6271         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6272         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6273         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6274         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6275         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6276         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6277         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6278         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6279         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6280         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6281         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6282         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6283         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6284         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6285         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6286         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6287         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6288         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6289         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6290         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6291         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6292         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6293         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6294         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6295         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6296         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6297         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6298         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6299         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6300         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6301         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6302         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6303         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6304         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6305         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6306         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6307         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6308         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6309         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6310         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6311         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6312         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6313         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6314         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6315         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6316         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6317         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6318         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6319         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6320         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6321         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6322         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6323         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6324         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6325         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6326         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6327         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6328         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6329         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6330         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6331         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6332         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6333         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6334         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6335         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6336         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6337         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6338         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6339         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6340         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6341         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6342         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6343         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6344         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6345         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6346         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6347         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6348         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6349         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6350         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6351         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6352         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6353         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6354         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6355         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6356         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6357         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6358         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6359         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6360         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6361         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6362         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6363         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6364         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6365         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6366         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6367         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6368         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6369         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6370         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6371         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6372         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6373         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6374         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6375         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6376         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6377         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6378         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6379         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6380         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6381         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6382         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6383         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6384         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6385         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6386         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6387         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6388         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6389         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6390         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6391         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6392         0x00000000, 0x00000000, 0x00000000,
6393 };
6394
6395 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6396         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6397         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6398         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6399         0x00000000, 0x00000000, 0x00000000,
6400 };
6401
6402 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6403         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6404         0x00000000, 0x00000000, 0x00000000,
6405 };
6406
6407 /* tp->lock is held. */
6408 static int tg3_load_tso_firmware(struct tg3 *tp)
6409 {
6410         struct fw_info info;
6411         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6412         int err, i;
6413
6414         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6415                 return 0;
6416
6417         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6418                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6419                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6420                 info.text_data = &tg3Tso5FwText[0];
6421                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6422                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6423                 info.rodata_data = &tg3Tso5FwRodata[0];
6424                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6425                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6426                 info.data_data = &tg3Tso5FwData[0];
6427                 cpu_base = RX_CPU_BASE;
6428                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6429                 cpu_scratch_size = (info.text_len +
6430                                     info.rodata_len +
6431                                     info.data_len +
6432                                     TG3_TSO5_FW_SBSS_LEN +
6433                                     TG3_TSO5_FW_BSS_LEN);
6434         } else {
6435                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6436                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6437                 info.text_data = &tg3TsoFwText[0];
6438                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6439                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6440                 info.rodata_data = &tg3TsoFwRodata[0];
6441                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6442                 info.data_len = TG3_TSO_FW_DATA_LEN;
6443                 info.data_data = &tg3TsoFwData[0];
6444                 cpu_base = TX_CPU_BASE;
6445                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6446                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6447         }
6448
6449         err = tg3_load_firmware_cpu(tp, cpu_base,
6450                                     cpu_scratch_base, cpu_scratch_size,
6451                                     &info);
6452         if (err)
6453                 return err;
6454
6455         /* Now startup the cpu. */
6456         tw32(cpu_base + CPU_STATE, 0xffffffff);
6457         tw32_f(cpu_base + CPU_PC,    info.text_base);
6458
6459         for (i = 0; i < 5; i++) {
6460                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6461                         break;
6462                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6463                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6464                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6465                 udelay(1000);
6466         }
6467         if (i >= 5) {
6468                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6469                        "to set CPU PC, is %08x should be %08x\n",
6470                        tp->dev->name, tr32(cpu_base + CPU_PC),
6471                        info.text_base);
6472                 return -ENODEV;
6473         }
6474         tw32(cpu_base + CPU_STATE, 0xffffffff);
6475         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6476         return 0;
6477 }
6478
6479
6480 /* tp->lock is held. */
6481 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6482 {
6483         u32 addr_high, addr_low;
6484         int i;
6485
6486         addr_high = ((tp->dev->dev_addr[0] << 8) |
6487                      tp->dev->dev_addr[1]);
6488         addr_low = ((tp->dev->dev_addr[2] << 24) |
6489                     (tp->dev->dev_addr[3] << 16) |
6490                     (tp->dev->dev_addr[4] <<  8) |
6491                     (tp->dev->dev_addr[5] <<  0));
6492         for (i = 0; i < 4; i++) {
6493                 if (i == 1 && skip_mac_1)
6494                         continue;
6495                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6496                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6497         }
6498
6499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6500             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6501                 for (i = 0; i < 12; i++) {
6502                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6503                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6504                 }
6505         }
6506
6507         addr_high = (tp->dev->dev_addr[0] +
6508                      tp->dev->dev_addr[1] +
6509                      tp->dev->dev_addr[2] +
6510                      tp->dev->dev_addr[3] +
6511                      tp->dev->dev_addr[4] +
6512                      tp->dev->dev_addr[5]) &
6513                 TX_BACKOFF_SEED_MASK;
6514         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6515 }
6516
6517 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6518 {
6519         struct tg3 *tp = netdev_priv(dev);
6520         struct sockaddr *addr = p;
6521         int err = 0, skip_mac_1 = 0;
6522
6523         if (!is_valid_ether_addr(addr->sa_data))
6524                 return -EINVAL;
6525
6526         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6527
6528         if (!netif_running(dev))
6529                 return 0;
6530
6531         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6532                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6533
6534                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6535                 addr0_low = tr32(MAC_ADDR_0_LOW);
6536                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6537                 addr1_low = tr32(MAC_ADDR_1_LOW);
6538
6539                 /* Skip MAC addr 1 if ASF is using it. */
6540                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6541                     !(addr1_high == 0 && addr1_low == 0))
6542                         skip_mac_1 = 1;
6543         }
6544         spin_lock_bh(&tp->lock);
6545         __tg3_set_mac_addr(tp, skip_mac_1);
6546         spin_unlock_bh(&tp->lock);
6547
6548         return err;
6549 }
6550
6551 /* tp->lock is held. */
6552 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6553                            dma_addr_t mapping, u32 maxlen_flags,
6554                            u32 nic_addr)
6555 {
6556         tg3_write_mem(tp,
6557                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6558                       ((u64) mapping >> 32));
6559         tg3_write_mem(tp,
6560                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6561                       ((u64) mapping & 0xffffffff));
6562         tg3_write_mem(tp,
6563                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6564                        maxlen_flags);
6565
6566         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6567                 tg3_write_mem(tp,
6568                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6569                               nic_addr);
6570 }
6571
6572 static void __tg3_set_rx_mode(struct net_device *);
6573 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6574 {
6575         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6576         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6577         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6578         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6579         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6580                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6581                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6582         }
6583         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6584         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6585         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6586                 u32 val = ec->stats_block_coalesce_usecs;
6587
6588                 if (!netif_carrier_ok(tp->dev))
6589                         val = 0;
6590
6591                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6592         }
6593 }
6594
6595 /* tp->lock is held. */
6596 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6597 {
6598         u32 val, rdmac_mode;
6599         int i, err, limit;
6600
6601         tg3_disable_ints(tp);
6602
6603         tg3_stop_fw(tp);
6604
6605         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6606
6607         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6608                 tg3_abort_hw(tp, 1);
6609         }
6610
6611         if (reset_phy)
6612                 tg3_phy_reset(tp);
6613
6614         err = tg3_chip_reset(tp);
6615         if (err)
6616                 return err;
6617
6618         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6619
6620         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6621             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6622                 val = tr32(TG3_CPMU_CTRL);
6623                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6624                 tw32(TG3_CPMU_CTRL, val);
6625
6626                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6627                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6628                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6629                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6630
6631                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6632                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6633                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6634                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6635
6636                 val = tr32(TG3_CPMU_HST_ACC);
6637                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6638                 val |= CPMU_HST_ACC_MACCLK_6_25;
6639                 tw32(TG3_CPMU_HST_ACC, val);
6640         }
6641
6642         /* This works around an issue with Athlon chipsets on
6643          * B3 tigon3 silicon.  This bit has no effect on any
6644          * other revision.  But do not set this on PCI Express
6645          * chips and don't even touch the clocks if the CPMU is present.
6646          */
6647         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6648                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6649                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6650                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6651         }
6652
6653         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6654             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6655                 val = tr32(TG3PCI_PCISTATE);
6656                 val |= PCISTATE_RETRY_SAME_DMA;
6657                 tw32(TG3PCI_PCISTATE, val);
6658         }
6659
6660         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6661                 /* Allow reads and writes to the
6662                  * APE register and memory space.
6663                  */
6664                 val = tr32(TG3PCI_PCISTATE);
6665                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6666                        PCISTATE_ALLOW_APE_SHMEM_WR;
6667                 tw32(TG3PCI_PCISTATE, val);
6668         }
6669
6670         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6671                 /* Enable some hw fixes.  */
6672                 val = tr32(TG3PCI_MSI_DATA);
6673                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6674                 tw32(TG3PCI_MSI_DATA, val);
6675         }
6676
6677         /* Descriptor ring init may make accesses to the
6678          * NIC SRAM area to setup the TX descriptors, so we
6679          * can only do this after the hardware has been
6680          * successfully reset.
6681          */
6682         err = tg3_init_rings(tp);
6683         if (err)
6684                 return err;
6685
6686         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6687             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6688                 /* This value is determined during the probe time DMA
6689                  * engine test, tg3_test_dma.
6690                  */
6691                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6692         }
6693
6694         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6695                           GRC_MODE_4X_NIC_SEND_RINGS |
6696                           GRC_MODE_NO_TX_PHDR_CSUM |
6697                           GRC_MODE_NO_RX_PHDR_CSUM);
6698         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6699
6700         /* Pseudo-header checksum is done by hardware logic and not
6701          * the offload processers, so make the chip do the pseudo-
6702          * header checksums on receive.  For transmit it is more
6703          * convenient to do the pseudo-header checksum in software
6704          * as Linux does that on transmit for us in all cases.
6705          */
6706         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6707
6708         tw32(GRC_MODE,
6709              tp->grc_mode |
6710              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6711
6712         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6713         val = tr32(GRC_MISC_CFG);
6714         val &= ~0xff;
6715         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6716         tw32(GRC_MISC_CFG, val);
6717
6718         /* Initialize MBUF/DESC pool. */
6719         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6720                 /* Do nothing.  */
6721         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6722                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6723                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6724                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6725                 else
6726                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6727                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6728                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6729         }
6730         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6731                 int fw_len;
6732
6733                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6734                           TG3_TSO5_FW_RODATA_LEN +
6735                           TG3_TSO5_FW_DATA_LEN +
6736                           TG3_TSO5_FW_SBSS_LEN +
6737                           TG3_TSO5_FW_BSS_LEN);
6738                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6739                 tw32(BUFMGR_MB_POOL_ADDR,
6740                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6741                 tw32(BUFMGR_MB_POOL_SIZE,
6742                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6743         }
6744
6745         if (tp->dev->mtu <= ETH_DATA_LEN) {
6746                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6747                      tp->bufmgr_config.mbuf_read_dma_low_water);
6748                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6749                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6750                 tw32(BUFMGR_MB_HIGH_WATER,
6751                      tp->bufmgr_config.mbuf_high_water);
6752         } else {
6753                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6754                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6755                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6756                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6757                 tw32(BUFMGR_MB_HIGH_WATER,
6758                      tp->bufmgr_config.mbuf_high_water_jumbo);
6759         }
6760         tw32(BUFMGR_DMA_LOW_WATER,
6761              tp->bufmgr_config.dma_low_water);
6762         tw32(BUFMGR_DMA_HIGH_WATER,
6763              tp->bufmgr_config.dma_high_water);
6764
6765         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6766         for (i = 0; i < 2000; i++) {
6767                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6768                         break;
6769                 udelay(10);
6770         }
6771         if (i >= 2000) {
6772                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6773                        tp->dev->name);
6774                 return -ENODEV;
6775         }
6776
6777         /* Setup replenish threshold. */
6778         val = tp->rx_pending / 8;
6779         if (val == 0)
6780                 val = 1;
6781         else if (val > tp->rx_std_max_post)
6782                 val = tp->rx_std_max_post;
6783         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6784                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6785                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6786
6787                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6788                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6789         }
6790
6791         tw32(RCVBDI_STD_THRESH, val);
6792
6793         /* Initialize TG3_BDINFO's at:
6794          *  RCVDBDI_STD_BD:     standard eth size rx ring
6795          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6796          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6797          *
6798          * like so:
6799          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6800          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6801          *                              ring attribute flags
6802          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6803          *
6804          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6805          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6806          *
6807          * The size of each ring is fixed in the firmware, but the location is
6808          * configurable.
6809          */
6810         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6811              ((u64) tp->rx_std_mapping >> 32));
6812         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6813              ((u64) tp->rx_std_mapping & 0xffffffff));
6814         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6815              NIC_SRAM_RX_BUFFER_DESC);
6816
6817         /* Don't even try to program the JUMBO/MINI buffer descriptor
6818          * configs on 5705.
6819          */
6820         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6821                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6822                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6823         } else {
6824                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6825                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6826
6827                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6828                      BDINFO_FLAGS_DISABLED);
6829
6830                 /* Setup replenish threshold. */
6831                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6832
6833                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6834                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6835                              ((u64) tp->rx_jumbo_mapping >> 32));
6836                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6837                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6838                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6839                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6840                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6841                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6842                 } else {
6843                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6844                              BDINFO_FLAGS_DISABLED);
6845                 }
6846
6847         }
6848
6849         /* There is only one send ring on 5705/5750, no need to explicitly
6850          * disable the others.
6851          */
6852         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6853                 /* Clear out send RCB ring in SRAM. */
6854                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6855                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6856                                       BDINFO_FLAGS_DISABLED);
6857         }
6858
6859         tp->tx_prod = 0;
6860         tp->tx_cons = 0;
6861         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6862         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6863
6864         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6865                        tp->tx_desc_mapping,
6866                        (TG3_TX_RING_SIZE <<
6867                         BDINFO_FLAGS_MAXLEN_SHIFT),
6868                        NIC_SRAM_TX_BUFFER_DESC);
6869
6870         /* There is only one receive return ring on 5705/5750, no need
6871          * to explicitly disable the others.
6872          */
6873         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6874                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6875                      i += TG3_BDINFO_SIZE) {
6876                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6877                                       BDINFO_FLAGS_DISABLED);
6878                 }
6879         }
6880
6881         tp->rx_rcb_ptr = 0;
6882         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6883
6884         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6885                        tp->rx_rcb_mapping,
6886                        (TG3_RX_RCB_RING_SIZE(tp) <<
6887                         BDINFO_FLAGS_MAXLEN_SHIFT),
6888                        0);
6889
6890         tp->rx_std_ptr = tp->rx_pending;
6891         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6892                      tp->rx_std_ptr);
6893
6894         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6895                                                 tp->rx_jumbo_pending : 0;
6896         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6897                      tp->rx_jumbo_ptr);
6898
6899         /* Initialize MAC address and backoff seed. */
6900         __tg3_set_mac_addr(tp, 0);
6901
6902         /* MTU + ethernet header + FCS + optional VLAN tag */
6903         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6904
6905         /* The slot time is changed by tg3_setup_phy if we
6906          * run at gigabit with half duplex.
6907          */
6908         tw32(MAC_TX_LENGTHS,
6909              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6910              (6 << TX_LENGTHS_IPG_SHIFT) |
6911              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6912
6913         /* Receive rules. */
6914         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6915         tw32(RCVLPC_CONFIG, 0x0181);
6916
6917         /* Calculate RDMAC_MODE setting early, we need it to determine
6918          * the RCVLPC_STATE_ENABLE mask.
6919          */
6920         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6921                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6922                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6923                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6924                       RDMAC_MODE_LNGREAD_ENAB);
6925
6926         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6927                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6928                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6929                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6930
6931         /* If statement applies to 5705 and 5750 PCI devices only */
6932         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6933              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6934             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6935                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6936                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6937                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6938                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6939                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6940                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6941                 }
6942         }
6943
6944         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6945                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6946
6947         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6948                 rdmac_mode |= (1 << 27);
6949
6950         /* Receive/send statistics. */
6951         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6952                 val = tr32(RCVLPC_STATS_ENABLE);
6953                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6954                 tw32(RCVLPC_STATS_ENABLE, val);
6955         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6956                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6957                 val = tr32(RCVLPC_STATS_ENABLE);
6958                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6959                 tw32(RCVLPC_STATS_ENABLE, val);
6960         } else {
6961                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6962         }
6963         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6964         tw32(SNDDATAI_STATSENAB, 0xffffff);
6965         tw32(SNDDATAI_STATSCTRL,
6966              (SNDDATAI_SCTRL_ENABLE |
6967               SNDDATAI_SCTRL_FASTUPD));
6968
6969         /* Setup host coalescing engine. */
6970         tw32(HOSTCC_MODE, 0);
6971         for (i = 0; i < 2000; i++) {
6972                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6973                         break;
6974                 udelay(10);
6975         }
6976
6977         __tg3_set_coalesce(tp, &tp->coal);
6978
6979         /* set status block DMA address */
6980         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6981              ((u64) tp->status_mapping >> 32));
6982         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6983              ((u64) tp->status_mapping & 0xffffffff));
6984
6985         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6986                 /* Status/statistics block address.  See tg3_timer,
6987                  * the tg3_periodic_fetch_stats call there, and
6988                  * tg3_get_stats to see how this works for 5705/5750 chips.
6989                  */
6990                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6991                      ((u64) tp->stats_mapping >> 32));
6992                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6993                      ((u64) tp->stats_mapping & 0xffffffff));
6994                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6995                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6996         }
6997
6998         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6999
7000         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7001         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7002         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7003                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7004
7005         /* Clear statistics/status block in chip, and status block in ram. */
7006         for (i = NIC_SRAM_STATS_BLK;
7007              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7008              i += sizeof(u32)) {
7009                 tg3_write_mem(tp, i, 0);
7010                 udelay(40);
7011         }
7012         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7013
7014         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7015                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7016                 /* reset to prevent losing 1st rx packet intermittently */
7017                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7018                 udelay(10);
7019         }
7020
7021         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7022                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7023         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7024             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7025             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7026                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7027         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7028         udelay(40);
7029
7030         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7031          * If TG3_FLG2_IS_NIC is zero, we should read the
7032          * register to preserve the GPIO settings for LOMs. The GPIOs,
7033          * whether used as inputs or outputs, are set by boot code after
7034          * reset.
7035          */
7036         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7037                 u32 gpio_mask;
7038
7039                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7040                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7041                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7042
7043                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7044                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7045                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7046
7047                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7048                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7049
7050                 tp->grc_local_ctrl &= ~gpio_mask;
7051                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7052
7053                 /* GPIO1 must be driven high for eeprom write protect */
7054                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7055                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7056                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7057         }
7058         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7059         udelay(100);
7060
7061         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7062         tp->last_tag = 0;
7063
7064         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7065                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7066                 udelay(40);
7067         }
7068
7069         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7070                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7071                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7072                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7073                WDMAC_MODE_LNGREAD_ENAB);
7074
7075         /* If statement applies to 5705 and 5750 PCI devices only */
7076         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7077              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7078             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7079                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7080                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7081                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7082                         /* nothing */
7083                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7084                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7085                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7086                         val |= WDMAC_MODE_RX_ACCEL;
7087                 }
7088         }
7089
7090         /* Enable host coalescing bug fix */
7091         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7092             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7093             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7094             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
7095                 val |= (1 << 29);
7096
7097         tw32_f(WDMAC_MODE, val);
7098         udelay(40);
7099
7100         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7101                 u16 pcix_cmd;
7102
7103                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7104                                      &pcix_cmd);
7105                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7106                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7107                         pcix_cmd |= PCI_X_CMD_READ_2K;
7108                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7109                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7110                         pcix_cmd |= PCI_X_CMD_READ_2K;
7111                 }
7112                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7113                                       pcix_cmd);
7114         }
7115
7116         tw32_f(RDMAC_MODE, rdmac_mode);
7117         udelay(40);
7118
7119         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7120         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7121                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7122
7123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7124                 tw32(SNDDATAC_MODE,
7125                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7126         else
7127                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7128
7129         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7130         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7131         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7132         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7133         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7134                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7135         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7136         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7137
7138         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7139                 err = tg3_load_5701_a0_firmware_fix(tp);
7140                 if (err)
7141                         return err;
7142         }
7143
7144         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7145                 err = tg3_load_tso_firmware(tp);
7146                 if (err)
7147                         return err;
7148         }
7149
7150         tp->tx_mode = TX_MODE_ENABLE;
7151         tw32_f(MAC_TX_MODE, tp->tx_mode);
7152         udelay(100);
7153
7154         tp->rx_mode = RX_MODE_ENABLE;
7155         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7156             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7157                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7158
7159         tw32_f(MAC_RX_MODE, tp->rx_mode);
7160         udelay(10);
7161
7162         if (tp->link_config.phy_is_low_power) {
7163                 tp->link_config.phy_is_low_power = 0;
7164                 tp->link_config.speed = tp->link_config.orig_speed;
7165                 tp->link_config.duplex = tp->link_config.orig_duplex;
7166                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7167         }
7168
7169         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
7170         tw32_f(MAC_MI_MODE, tp->mi_mode);
7171         udelay(80);
7172
7173         tw32(MAC_LED_CTRL, tp->led_ctrl);
7174
7175         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7176         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7177                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7178                 udelay(10);
7179         }
7180         tw32_f(MAC_RX_MODE, tp->rx_mode);
7181         udelay(10);
7182
7183         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7184                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7185                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7186                         /* Set drive transmission level to 1.2V  */
7187                         /* only if the signal pre-emphasis bit is not set  */
7188                         val = tr32(MAC_SERDES_CFG);
7189                         val &= 0xfffff000;
7190                         val |= 0x880;
7191                         tw32(MAC_SERDES_CFG, val);
7192                 }
7193                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7194                         tw32(MAC_SERDES_CFG, 0x616000);
7195         }
7196
7197         /* Prevent chip from dropping frames when flow control
7198          * is enabled.
7199          */
7200         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7201
7202         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7203             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7204                 /* Use hardware link auto-negotiation */
7205                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7206         }
7207
7208         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7209             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7210                 u32 tmp;
7211
7212                 tmp = tr32(SERDES_RX_CTRL);
7213                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7214                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7215                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7216                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7217         }
7218
7219         err = tg3_setup_phy(tp, 0);
7220         if (err)
7221                 return err;
7222
7223         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7224             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7225                 u32 tmp;
7226
7227                 /* Clear CRC stats. */
7228                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7229                         tg3_writephy(tp, MII_TG3_TEST1,
7230                                      tmp | MII_TG3_TEST1_CRC_EN);
7231                         tg3_readphy(tp, 0x14, &tmp);
7232                 }
7233         }
7234
7235         __tg3_set_rx_mode(tp->dev);
7236
7237         /* Initialize receive rules. */
7238         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7239         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7240         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7241         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7242
7243         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7244             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7245                 limit = 8;
7246         else
7247                 limit = 16;
7248         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7249                 limit -= 4;
7250         switch (limit) {
7251         case 16:
7252                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7253         case 15:
7254                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7255         case 14:
7256                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7257         case 13:
7258                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7259         case 12:
7260                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7261         case 11:
7262                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7263         case 10:
7264                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7265         case 9:
7266                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7267         case 8:
7268                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7269         case 7:
7270                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7271         case 6:
7272                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7273         case 5:
7274                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7275         case 4:
7276                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7277         case 3:
7278                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7279         case 2:
7280         case 1:
7281
7282         default:
7283                 break;
7284         };
7285
7286         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7287                 /* Write our heartbeat update interval to APE. */
7288                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7289                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7290
7291         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7292
7293         return 0;
7294 }
7295
7296 /* Called at device open time to get the chip ready for
7297  * packet processing.  Invoked with tp->lock held.
7298  */
7299 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7300 {
7301         int err;
7302
7303         /* Force the chip into D0. */
7304         err = tg3_set_power_state(tp, PCI_D0);
7305         if (err)
7306                 goto out;
7307
7308         tg3_switch_clocks(tp);
7309
7310         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7311
7312         err = tg3_reset_hw(tp, reset_phy);
7313
7314 out:
7315         return err;
7316 }
7317
7318 #define TG3_STAT_ADD32(PSTAT, REG) \
7319 do {    u32 __val = tr32(REG); \
7320         (PSTAT)->low += __val; \
7321         if ((PSTAT)->low < __val) \
7322                 (PSTAT)->high += 1; \
7323 } while (0)
7324
7325 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7326 {
7327         struct tg3_hw_stats *sp = tp->hw_stats;
7328
7329         if (!netif_carrier_ok(tp->dev))
7330                 return;
7331
7332         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7333         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7334         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7335         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7336         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7337         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7338         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7339         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7340         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7341         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7342         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7343         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7344         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7345
7346         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7347         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7348         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7349         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7350         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7351         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7352         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7353         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7354         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7355         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7356         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7357         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7358         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7359         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7360
7361         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7362         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7363         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7364 }
7365
7366 static void tg3_timer(unsigned long __opaque)
7367 {
7368         struct tg3 *tp = (struct tg3 *) __opaque;
7369
7370         if (tp->irq_sync)
7371                 goto restart_timer;
7372
7373         spin_lock(&tp->lock);
7374
7375         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7376                 /* All of this garbage is because when using non-tagged
7377                  * IRQ status the mailbox/status_block protocol the chip
7378                  * uses with the cpu is race prone.
7379                  */
7380                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7381                         tw32(GRC_LOCAL_CTRL,
7382                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7383                 } else {
7384                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7385                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7386                 }
7387
7388                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7389                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7390                         spin_unlock(&tp->lock);
7391                         schedule_work(&tp->reset_task);
7392                         return;
7393                 }
7394         }
7395
7396         /* This part only runs once per second. */
7397         if (!--tp->timer_counter) {
7398                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7399                         tg3_periodic_fetch_stats(tp);
7400
7401                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7402                         u32 mac_stat;
7403                         int phy_event;
7404
7405                         mac_stat = tr32(MAC_STATUS);
7406
7407                         phy_event = 0;
7408                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7409                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7410                                         phy_event = 1;
7411                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7412                                 phy_event = 1;
7413
7414                         if (phy_event)
7415                                 tg3_setup_phy(tp, 0);
7416                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7417                         u32 mac_stat = tr32(MAC_STATUS);
7418                         int need_setup = 0;
7419
7420                         if (netif_carrier_ok(tp->dev) &&
7421                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7422                                 need_setup = 1;
7423                         }
7424                         if (! netif_carrier_ok(tp->dev) &&
7425                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7426                                          MAC_STATUS_SIGNAL_DET))) {
7427                                 need_setup = 1;
7428                         }
7429                         if (need_setup) {
7430                                 if (!tp->serdes_counter) {
7431                                         tw32_f(MAC_MODE,
7432                                              (tp->mac_mode &
7433                                               ~MAC_MODE_PORT_MODE_MASK));
7434                                         udelay(40);
7435                                         tw32_f(MAC_MODE, tp->mac_mode);
7436                                         udelay(40);
7437                                 }
7438                                 tg3_setup_phy(tp, 0);
7439                         }
7440                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7441                         tg3_serdes_parallel_detect(tp);
7442
7443                 tp->timer_counter = tp->timer_multiplier;
7444         }
7445
7446         /* Heartbeat is only sent once every 2 seconds.
7447          *
7448          * The heartbeat is to tell the ASF firmware that the host
7449          * driver is still alive.  In the event that the OS crashes,
7450          * ASF needs to reset the hardware to free up the FIFO space
7451          * that may be filled with rx packets destined for the host.
7452          * If the FIFO is full, ASF will no longer function properly.
7453          *
7454          * Unintended resets have been reported on real time kernels
7455          * where the timer doesn't run on time.  Netpoll will also have
7456          * same problem.
7457          *
7458          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7459          * to check the ring condition when the heartbeat is expiring
7460          * before doing the reset.  This will prevent most unintended
7461          * resets.
7462          */
7463         if (!--tp->asf_counter) {
7464                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7465                         u32 val;
7466
7467                         tg3_wait_for_event_ack(tp);
7468
7469                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7470                                       FWCMD_NICDRV_ALIVE3);
7471                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7472                         /* 5 seconds timeout */
7473                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7474                         val = tr32(GRC_RX_CPU_EVENT);
7475                         val |= GRC_RX_CPU_DRIVER_EVENT;
7476                         tw32_f(GRC_RX_CPU_EVENT, val);
7477                 }
7478                 tp->asf_counter = tp->asf_multiplier;
7479         }
7480
7481         spin_unlock(&tp->lock);
7482
7483 restart_timer:
7484         tp->timer.expires = jiffies + tp->timer_offset;
7485         add_timer(&tp->timer);
7486 }
7487
7488 static int tg3_request_irq(struct tg3 *tp)
7489 {
7490         irq_handler_t fn;
7491         unsigned long flags;
7492         struct net_device *dev = tp->dev;
7493
7494         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7495                 fn = tg3_msi;
7496                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7497                         fn = tg3_msi_1shot;
7498                 flags = IRQF_SAMPLE_RANDOM;
7499         } else {
7500                 fn = tg3_interrupt;
7501                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7502                         fn = tg3_interrupt_tagged;
7503                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7504         }
7505         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7506 }
7507
7508 static int tg3_test_interrupt(struct tg3 *tp)
7509 {
7510         struct net_device *dev = tp->dev;
7511         int err, i, intr_ok = 0;
7512
7513         if (!netif_running(dev))
7514                 return -ENODEV;
7515
7516         tg3_disable_ints(tp);
7517
7518         free_irq(tp->pdev->irq, dev);
7519
7520         err = request_irq(tp->pdev->irq, tg3_test_isr,
7521                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7522         if (err)
7523                 return err;
7524
7525         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7526         tg3_enable_ints(tp);
7527
7528         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7529                HOSTCC_MODE_NOW);
7530
7531         for (i = 0; i < 5; i++) {
7532                 u32 int_mbox, misc_host_ctrl;
7533
7534                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7535                                         TG3_64BIT_REG_LOW);
7536                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7537
7538                 if ((int_mbox != 0) ||
7539                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7540                         intr_ok = 1;
7541                         break;
7542                 }
7543
7544                 msleep(10);
7545         }
7546
7547         tg3_disable_ints(tp);
7548
7549         free_irq(tp->pdev->irq, dev);
7550
7551         err = tg3_request_irq(tp);
7552
7553         if (err)
7554                 return err;
7555
7556         if (intr_ok)
7557                 return 0;
7558
7559         return -EIO;
7560 }
7561
7562 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7563  * successfully restored
7564  */
7565 static int tg3_test_msi(struct tg3 *tp)
7566 {
7567         struct net_device *dev = tp->dev;
7568         int err;
7569         u16 pci_cmd;
7570
7571         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7572                 return 0;
7573
7574         /* Turn off SERR reporting in case MSI terminates with Master
7575          * Abort.
7576          */
7577         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7578         pci_write_config_word(tp->pdev, PCI_COMMAND,
7579                               pci_cmd & ~PCI_COMMAND_SERR);
7580
7581         err = tg3_test_interrupt(tp);
7582
7583         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7584
7585         if (!err)
7586                 return 0;
7587
7588         /* other failures */
7589         if (err != -EIO)
7590                 return err;
7591
7592         /* MSI test failed, go back to INTx mode */
7593         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7594                "switching to INTx mode. Please report this failure to "
7595                "the PCI maintainer and include system chipset information.\n",
7596                        tp->dev->name);
7597
7598         free_irq(tp->pdev->irq, dev);
7599         pci_disable_msi(tp->pdev);
7600
7601         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7602
7603         err = tg3_request_irq(tp);
7604         if (err)
7605                 return err;
7606
7607         /* Need to reset the chip because the MSI cycle may have terminated
7608          * with Master Abort.
7609          */
7610         tg3_full_lock(tp, 1);
7611
7612         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7613         err = tg3_init_hw(tp, 1);
7614
7615         tg3_full_unlock(tp);
7616
7617         if (err)
7618                 free_irq(tp->pdev->irq, dev);
7619
7620         return err;
7621 }
7622
7623 static int tg3_open(struct net_device *dev)
7624 {
7625         struct tg3 *tp = netdev_priv(dev);
7626         int err;
7627
7628         netif_carrier_off(tp->dev);
7629
7630         tg3_full_lock(tp, 0);
7631
7632         err = tg3_set_power_state(tp, PCI_D0);
7633         if (err) {
7634                 tg3_full_unlock(tp);
7635                 return err;
7636         }
7637
7638         tg3_disable_ints(tp);
7639         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7640
7641         tg3_full_unlock(tp);
7642
7643         /* The placement of this call is tied
7644          * to the setup and use of Host TX descriptors.
7645          */
7646         err = tg3_alloc_consistent(tp);
7647         if (err)
7648                 return err;
7649
7650         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7651                 /* All MSI supporting chips should support tagged
7652                  * status.  Assert that this is the case.
7653                  */
7654                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7655                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7656                                "Not using MSI.\n", tp->dev->name);
7657                 } else if (pci_enable_msi(tp->pdev) == 0) {
7658                         u32 msi_mode;
7659
7660                         msi_mode = tr32(MSGINT_MODE);
7661                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7662                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7663                 }
7664         }
7665         err = tg3_request_irq(tp);
7666
7667         if (err) {
7668                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7669                         pci_disable_msi(tp->pdev);
7670                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7671                 }
7672                 tg3_free_consistent(tp);
7673                 return err;
7674         }
7675
7676         napi_enable(&tp->napi);
7677
7678         tg3_full_lock(tp, 0);
7679
7680         err = tg3_init_hw(tp, 1);
7681         if (err) {
7682                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7683                 tg3_free_rings(tp);
7684         } else {
7685                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7686                         tp->timer_offset = HZ;
7687                 else
7688                         tp->timer_offset = HZ / 10;
7689
7690                 BUG_ON(tp->timer_offset > HZ);
7691                 tp->timer_counter = tp->timer_multiplier =
7692                         (HZ / tp->timer_offset);
7693                 tp->asf_counter = tp->asf_multiplier =
7694                         ((HZ / tp->timer_offset) * 2);
7695
7696                 init_timer(&tp->timer);
7697                 tp->timer.expires = jiffies + tp->timer_offset;
7698                 tp->timer.data = (unsigned long) tp;
7699                 tp->timer.function = tg3_timer;
7700         }
7701
7702         tg3_full_unlock(tp);
7703
7704         if (err) {
7705                 napi_disable(&tp->napi);
7706                 free_irq(tp->pdev->irq, dev);
7707                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7708                         pci_disable_msi(tp->pdev);
7709                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7710                 }
7711                 tg3_free_consistent(tp);
7712                 return err;
7713         }
7714
7715         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7716                 err = tg3_test_msi(tp);
7717
7718                 if (err) {
7719                         tg3_full_lock(tp, 0);
7720
7721                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7722                                 pci_disable_msi(tp->pdev);
7723                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7724                         }
7725                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7726                         tg3_free_rings(tp);
7727                         tg3_free_consistent(tp);
7728
7729                         tg3_full_unlock(tp);
7730
7731                         napi_disable(&tp->napi);
7732
7733                         return err;
7734                 }
7735
7736                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7737                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7738                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7739
7740                                 tw32(PCIE_TRANSACTION_CFG,
7741                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7742                         }
7743                 }
7744         }
7745
7746         tg3_full_lock(tp, 0);
7747
7748         add_timer(&tp->timer);
7749         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7750         tg3_enable_ints(tp);
7751
7752         tg3_full_unlock(tp);
7753
7754         netif_start_queue(dev);
7755
7756         return 0;
7757 }
7758
7759 #if 0
7760 /*static*/ void tg3_dump_state(struct tg3 *tp)
7761 {
7762         u32 val32, val32_2, val32_3, val32_4, val32_5;
7763         u16 val16;
7764         int i;
7765
7766         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7767         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7768         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7769                val16, val32);
7770
7771         /* MAC block */
7772         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7773                tr32(MAC_MODE), tr32(MAC_STATUS));
7774         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7775                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7776         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7777                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7778         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7779                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7780
7781         /* Send data initiator control block */
7782         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7783                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7784         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7785                tr32(SNDDATAI_STATSCTRL));
7786
7787         /* Send data completion control block */
7788         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7789
7790         /* Send BD ring selector block */
7791         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7792                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7793
7794         /* Send BD initiator control block */
7795         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7796                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7797
7798         /* Send BD completion control block */
7799         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7800
7801         /* Receive list placement control block */
7802         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7803                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7804         printk("       RCVLPC_STATSCTRL[%08x]\n",
7805                tr32(RCVLPC_STATSCTRL));
7806
7807         /* Receive data and receive BD initiator control block */
7808         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7809                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7810
7811         /* Receive data completion control block */
7812         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7813                tr32(RCVDCC_MODE));
7814
7815         /* Receive BD initiator control block */
7816         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7817                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7818
7819         /* Receive BD completion control block */
7820         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7821                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7822
7823         /* Receive list selector control block */
7824         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7825                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7826
7827         /* Mbuf cluster free block */
7828         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7829                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7830
7831         /* Host coalescing control block */
7832         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7833                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7834         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7835                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7836                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7837         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7838                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7839                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7840         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7841                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7842         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7843                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7844
7845         /* Memory arbiter control block */
7846         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7847                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7848
7849         /* Buffer manager control block */
7850         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7851                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7852         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7853                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7854         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7855                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7856                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7857                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7858
7859         /* Read DMA control block */
7860         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7861                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7862
7863         /* Write DMA control block */
7864         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7865                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7866
7867         /* DMA completion block */
7868         printk("DEBUG: DMAC_MODE[%08x]\n",
7869                tr32(DMAC_MODE));
7870
7871         /* GRC block */
7872         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7873                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7874         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7875                tr32(GRC_LOCAL_CTRL));
7876
7877         /* TG3_BDINFOs */
7878         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7879                tr32(RCVDBDI_JUMBO_BD + 0x0),
7880                tr32(RCVDBDI_JUMBO_BD + 0x4),
7881                tr32(RCVDBDI_JUMBO_BD + 0x8),
7882                tr32(RCVDBDI_JUMBO_BD + 0xc));
7883         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7884                tr32(RCVDBDI_STD_BD + 0x0),
7885                tr32(RCVDBDI_STD_BD + 0x4),
7886                tr32(RCVDBDI_STD_BD + 0x8),
7887                tr32(RCVDBDI_STD_BD + 0xc));
7888         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7889                tr32(RCVDBDI_MINI_BD + 0x0),
7890                tr32(RCVDBDI_MINI_BD + 0x4),
7891                tr32(RCVDBDI_MINI_BD + 0x8),
7892                tr32(RCVDBDI_MINI_BD + 0xc));
7893
7894         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7895         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7896         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7897         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7898         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7899                val32, val32_2, val32_3, val32_4);
7900
7901         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7902         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7903         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7904         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7905         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7906                val32, val32_2, val32_3, val32_4);
7907
7908         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7909         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7910         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7911         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7912         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7913         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7914                val32, val32_2, val32_3, val32_4, val32_5);
7915
7916         /* SW status block */
7917         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7918                tp->hw_status->status,
7919                tp->hw_status->status_tag,
7920                tp->hw_status->rx_jumbo_consumer,
7921                tp->hw_status->rx_consumer,
7922                tp->hw_status->rx_mini_consumer,
7923                tp->hw_status->idx[0].rx_producer,
7924                tp->hw_status->idx[0].tx_consumer);
7925
7926         /* SW statistics block */
7927         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7928                ((u32 *)tp->hw_stats)[0],
7929                ((u32 *)tp->hw_stats)[1],
7930                ((u32 *)tp->hw_stats)[2],
7931                ((u32 *)tp->hw_stats)[3]);
7932
7933         /* Mailboxes */
7934         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7935                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7936                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7937                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7938                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7939
7940         /* NIC side send descriptors. */
7941         for (i = 0; i < 6; i++) {
7942                 unsigned long txd;
7943
7944                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7945                         + (i * sizeof(struct tg3_tx_buffer_desc));
7946                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7947                        i,
7948                        readl(txd + 0x0), readl(txd + 0x4),
7949                        readl(txd + 0x8), readl(txd + 0xc));
7950         }
7951
7952         /* NIC side RX descriptors. */
7953         for (i = 0; i < 6; i++) {
7954                 unsigned long rxd;
7955
7956                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7957                         + (i * sizeof(struct tg3_rx_buffer_desc));
7958                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7959                        i,
7960                        readl(rxd + 0x0), readl(rxd + 0x4),
7961                        readl(rxd + 0x8), readl(rxd + 0xc));
7962                 rxd += (4 * sizeof(u32));
7963                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7964                        i,
7965                        readl(rxd + 0x0), readl(rxd + 0x4),
7966                        readl(rxd + 0x8), readl(rxd + 0xc));
7967         }
7968
7969         for (i = 0; i < 6; i++) {
7970                 unsigned long rxd;
7971
7972                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7973                         + (i * sizeof(struct tg3_rx_buffer_desc));
7974                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7975                        i,
7976                        readl(rxd + 0x0), readl(rxd + 0x4),
7977                        readl(rxd + 0x8), readl(rxd + 0xc));
7978                 rxd += (4 * sizeof(u32));
7979                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7980                        i,
7981                        readl(rxd + 0x0), readl(rxd + 0x4),
7982                        readl(rxd + 0x8), readl(rxd + 0xc));
7983         }
7984 }
7985 #endif
7986
7987 static struct net_device_stats *tg3_get_stats(struct net_device *);
7988 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7989
7990 static int tg3_close(struct net_device *dev)
7991 {
7992         struct tg3 *tp = netdev_priv(dev);
7993
7994         napi_disable(&tp->napi);
7995         cancel_work_sync(&tp->reset_task);
7996
7997         netif_stop_queue(dev);
7998
7999         del_timer_sync(&tp->timer);
8000
8001         tg3_full_lock(tp, 1);
8002 #if 0
8003         tg3_dump_state(tp);
8004 #endif
8005
8006         tg3_disable_ints(tp);
8007
8008         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8009         tg3_free_rings(tp);
8010         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8011
8012         tg3_full_unlock(tp);
8013
8014         free_irq(tp->pdev->irq, dev);
8015         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8016                 pci_disable_msi(tp->pdev);
8017                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8018         }
8019
8020         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8021                sizeof(tp->net_stats_prev));
8022         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8023                sizeof(tp->estats_prev));
8024
8025         tg3_free_consistent(tp);
8026
8027         tg3_set_power_state(tp, PCI_D3hot);
8028
8029         netif_carrier_off(tp->dev);
8030
8031         return 0;
8032 }
8033
8034 static inline unsigned long get_stat64(tg3_stat64_t *val)
8035 {
8036         unsigned long ret;
8037
8038 #if (BITS_PER_LONG == 32)
8039         ret = val->low;
8040 #else
8041         ret = ((u64)val->high << 32) | ((u64)val->low);
8042 #endif
8043         return ret;
8044 }
8045
8046 static unsigned long calc_crc_errors(struct tg3 *tp)
8047 {
8048         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8049
8050         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8051             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8052              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8053                 u32 val;
8054
8055                 spin_lock_bh(&tp->lock);
8056                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8057                         tg3_writephy(tp, MII_TG3_TEST1,
8058                                      val | MII_TG3_TEST1_CRC_EN);
8059                         tg3_readphy(tp, 0x14, &val);
8060                 } else
8061                         val = 0;
8062                 spin_unlock_bh(&tp->lock);
8063
8064                 tp->phy_crc_errors += val;
8065
8066                 return tp->phy_crc_errors;
8067         }
8068
8069         return get_stat64(&hw_stats->rx_fcs_errors);
8070 }
8071
8072 #define ESTAT_ADD(member) \
8073         estats->member =        old_estats->member + \
8074                                 get_stat64(&hw_stats->member)
8075
8076 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8077 {
8078         struct tg3_ethtool_stats *estats = &tp->estats;
8079         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8080         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8081
8082         if (!hw_stats)
8083                 return old_estats;
8084
8085         ESTAT_ADD(rx_octets);
8086         ESTAT_ADD(rx_fragments);
8087         ESTAT_ADD(rx_ucast_packets);
8088         ESTAT_ADD(rx_mcast_packets);
8089         ESTAT_ADD(rx_bcast_packets);
8090         ESTAT_ADD(rx_fcs_errors);
8091         ESTAT_ADD(rx_align_errors);
8092         ESTAT_ADD(rx_xon_pause_rcvd);
8093         ESTAT_ADD(rx_xoff_pause_rcvd);
8094         ESTAT_ADD(rx_mac_ctrl_rcvd);
8095         ESTAT_ADD(rx_xoff_entered);
8096         ESTAT_ADD(rx_frame_too_long_errors);
8097         ESTAT_ADD(rx_jabbers);
8098         ESTAT_ADD(rx_undersize_packets);
8099         ESTAT_ADD(rx_in_length_errors);
8100         ESTAT_ADD(rx_out_length_errors);
8101         ESTAT_ADD(rx_64_or_less_octet_packets);
8102         ESTAT_ADD(rx_65_to_127_octet_packets);
8103         ESTAT_ADD(rx_128_to_255_octet_packets);
8104         ESTAT_ADD(rx_256_to_511_octet_packets);
8105         ESTAT_ADD(rx_512_to_1023_octet_packets);
8106         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8107         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8108         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8109         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8110         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8111
8112         ESTAT_ADD(tx_octets);
8113         ESTAT_ADD(tx_collisions);
8114         ESTAT_ADD(tx_xon_sent);
8115         ESTAT_ADD(tx_xoff_sent);
8116         ESTAT_ADD(tx_flow_control);
8117         ESTAT_ADD(tx_mac_errors);
8118         ESTAT_ADD(tx_single_collisions);
8119         ESTAT_ADD(tx_mult_collisions);
8120         ESTAT_ADD(tx_deferred);
8121         ESTAT_ADD(tx_excessive_collisions);
8122         ESTAT_ADD(tx_late_collisions);
8123         ESTAT_ADD(tx_collide_2times);
8124         ESTAT_ADD(tx_collide_3times);
8125         ESTAT_ADD(tx_collide_4times);
8126         ESTAT_ADD(tx_collide_5times);
8127         ESTAT_ADD(tx_collide_6times);
8128         ESTAT_ADD(tx_collide_7times);
8129         ESTAT_ADD(tx_collide_8times);
8130         ESTAT_ADD(tx_collide_9times);
8131         ESTAT_ADD(tx_collide_10times);
8132         ESTAT_ADD(tx_collide_11times);
8133         ESTAT_ADD(tx_collide_12times);
8134         ESTAT_ADD(tx_collide_13times);
8135         ESTAT_ADD(tx_collide_14times);
8136         ESTAT_ADD(tx_collide_15times);
8137         ESTAT_ADD(tx_ucast_packets);
8138         ESTAT_ADD(tx_mcast_packets);
8139         ESTAT_ADD(tx_bcast_packets);
8140         ESTAT_ADD(tx_carrier_sense_errors);
8141         ESTAT_ADD(tx_discards);
8142         ESTAT_ADD(tx_errors);
8143
8144         ESTAT_ADD(dma_writeq_full);
8145         ESTAT_ADD(dma_write_prioq_full);
8146         ESTAT_ADD(rxbds_empty);
8147         ESTAT_ADD(rx_discards);
8148         ESTAT_ADD(rx_errors);
8149         ESTAT_ADD(rx_threshold_hit);
8150
8151         ESTAT_ADD(dma_readq_full);
8152         ESTAT_ADD(dma_read_prioq_full);
8153         ESTAT_ADD(tx_comp_queue_full);
8154
8155         ESTAT_ADD(ring_set_send_prod_index);
8156         ESTAT_ADD(ring_status_update);
8157         ESTAT_ADD(nic_irqs);
8158         ESTAT_ADD(nic_avoided_irqs);
8159         ESTAT_ADD(nic_tx_threshold_hit);
8160
8161         return estats;
8162 }
8163
8164 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8165 {
8166         struct tg3 *tp = netdev_priv(dev);
8167         struct net_device_stats *stats = &tp->net_stats;
8168         struct net_device_stats *old_stats = &tp->net_stats_prev;
8169         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8170
8171         if (!hw_stats)
8172                 return old_stats;
8173
8174         stats->rx_packets = old_stats->rx_packets +
8175                 get_stat64(&hw_stats->rx_ucast_packets) +
8176                 get_stat64(&hw_stats->rx_mcast_packets) +
8177                 get_stat64(&hw_stats->rx_bcast_packets);
8178
8179         stats->tx_packets = old_stats->tx_packets +
8180                 get_stat64(&hw_stats->tx_ucast_packets) +
8181                 get_stat64(&hw_stats->tx_mcast_packets) +
8182                 get_stat64(&hw_stats->tx_bcast_packets);
8183
8184         stats->rx_bytes = old_stats->rx_bytes +
8185                 get_stat64(&hw_stats->rx_octets);
8186         stats->tx_bytes = old_stats->tx_bytes +
8187                 get_stat64(&hw_stats->tx_octets);
8188
8189         stats->rx_errors = old_stats->rx_errors +
8190                 get_stat64(&hw_stats->rx_errors);
8191         stats->tx_errors = old_stats->tx_errors +
8192                 get_stat64(&hw_stats->tx_errors) +
8193                 get_stat64(&hw_stats->tx_mac_errors) +
8194                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8195                 get_stat64(&hw_stats->tx_discards);
8196
8197         stats->multicast = old_stats->multicast +
8198                 get_stat64(&hw_stats->rx_mcast_packets);
8199         stats->collisions = old_stats->collisions +
8200                 get_stat64(&hw_stats->tx_collisions);
8201
8202         stats->rx_length_errors = old_stats->rx_length_errors +
8203                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8204                 get_stat64(&hw_stats->rx_undersize_packets);
8205
8206         stats->rx_over_errors = old_stats->rx_over_errors +
8207                 get_stat64(&hw_stats->rxbds_empty);
8208         stats->rx_frame_errors = old_stats->rx_frame_errors +
8209                 get_stat64(&hw_stats->rx_align_errors);
8210         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8211                 get_stat64(&hw_stats->tx_discards);
8212         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8213                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8214
8215         stats->rx_crc_errors = old_stats->rx_crc_errors +
8216                 calc_crc_errors(tp);
8217
8218         stats->rx_missed_errors = old_stats->rx_missed_errors +
8219                 get_stat64(&hw_stats->rx_discards);
8220
8221         return stats;
8222 }
8223
8224 static inline u32 calc_crc(unsigned char *buf, int len)
8225 {
8226         u32 reg;
8227         u32 tmp;
8228         int j, k;
8229
8230         reg = 0xffffffff;
8231
8232         for (j = 0; j < len; j++) {
8233                 reg ^= buf[j];
8234
8235                 for (k = 0; k < 8; k++) {
8236                         tmp = reg & 0x01;
8237
8238                         reg >>= 1;
8239
8240                         if (tmp) {
8241                                 reg ^= 0xedb88320;
8242                         }
8243                 }
8244         }
8245
8246         return ~reg;
8247 }
8248
8249 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8250 {
8251         /* accept or reject all multicast frames */
8252         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8253         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8254         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8255         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8256 }
8257
8258 static void __tg3_set_rx_mode(struct net_device *dev)
8259 {
8260         struct tg3 *tp = netdev_priv(dev);
8261         u32 rx_mode;
8262
8263         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8264                                   RX_MODE_KEEP_VLAN_TAG);
8265
8266         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8267          * flag clear.
8268          */
8269 #if TG3_VLAN_TAG_USED
8270         if (!tp->vlgrp &&
8271             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8272                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8273 #else
8274         /* By definition, VLAN is disabled always in this
8275          * case.
8276          */
8277         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8278                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8279 #endif
8280
8281         if (dev->flags & IFF_PROMISC) {
8282                 /* Promiscuous mode. */
8283                 rx_mode |= RX_MODE_PROMISC;
8284         } else if (dev->flags & IFF_ALLMULTI) {
8285                 /* Accept all multicast. */
8286                 tg3_set_multi (tp, 1);
8287         } else if (dev->mc_count < 1) {
8288                 /* Reject all multicast. */
8289                 tg3_set_multi (tp, 0);
8290         } else {
8291                 /* Accept one or more multicast(s). */
8292                 struct dev_mc_list *mclist;
8293                 unsigned int i;
8294                 u32 mc_filter[4] = { 0, };
8295                 u32 regidx;
8296                 u32 bit;
8297                 u32 crc;
8298
8299                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8300                      i++, mclist = mclist->next) {
8301
8302                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8303                         bit = ~crc & 0x7f;
8304                         regidx = (bit & 0x60) >> 5;
8305                         bit &= 0x1f;
8306                         mc_filter[regidx] |= (1 << bit);
8307                 }
8308
8309                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8310                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8311                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8312                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8313         }
8314
8315         if (rx_mode != tp->rx_mode) {
8316                 tp->rx_mode = rx_mode;
8317                 tw32_f(MAC_RX_MODE, rx_mode);
8318                 udelay(10);
8319         }
8320 }
8321
8322 static void tg3_set_rx_mode(struct net_device *dev)
8323 {
8324         struct tg3 *tp = netdev_priv(dev);
8325
8326         if (!netif_running(dev))
8327                 return;
8328
8329         tg3_full_lock(tp, 0);
8330         __tg3_set_rx_mode(dev);
8331         tg3_full_unlock(tp);
8332 }
8333
8334 #define TG3_REGDUMP_LEN         (32 * 1024)
8335
8336 static int tg3_get_regs_len(struct net_device *dev)
8337 {
8338         return TG3_REGDUMP_LEN;
8339 }
8340
8341 static void tg3_get_regs(struct net_device *dev,
8342                 struct ethtool_regs *regs, void *_p)
8343 {
8344         u32 *p = _p;
8345         struct tg3 *tp = netdev_priv(dev);
8346         u8 *orig_p = _p;
8347         int i;
8348
8349         regs->version = 0;
8350
8351         memset(p, 0, TG3_REGDUMP_LEN);
8352
8353         if (tp->link_config.phy_is_low_power)
8354                 return;
8355
8356         tg3_full_lock(tp, 0);
8357
8358 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8359 #define GET_REG32_LOOP(base,len)                \
8360 do {    p = (u32 *)(orig_p + (base));           \
8361         for (i = 0; i < len; i += 4)            \
8362                 __GET_REG32((base) + i);        \
8363 } while (0)
8364 #define GET_REG32_1(reg)                        \
8365 do {    p = (u32 *)(orig_p + (reg));            \
8366         __GET_REG32((reg));                     \
8367 } while (0)
8368
8369         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8370         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8371         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8372         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8373         GET_REG32_1(SNDDATAC_MODE);
8374         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8375         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8376         GET_REG32_1(SNDBDC_MODE);
8377         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8378         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8379         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8380         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8381         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8382         GET_REG32_1(RCVDCC_MODE);
8383         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8384         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8385         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8386         GET_REG32_1(MBFREE_MODE);
8387         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8388         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8389         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8390         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8391         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8392         GET_REG32_1(RX_CPU_MODE);
8393         GET_REG32_1(RX_CPU_STATE);
8394         GET_REG32_1(RX_CPU_PGMCTR);
8395         GET_REG32_1(RX_CPU_HWBKPT);
8396         GET_REG32_1(TX_CPU_MODE);
8397         GET_REG32_1(TX_CPU_STATE);
8398         GET_REG32_1(TX_CPU_PGMCTR);
8399         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8400         GET_REG32_LOOP(FTQ_RESET, 0x120);
8401         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8402         GET_REG32_1(DMAC_MODE);
8403         GET_REG32_LOOP(GRC_MODE, 0x4c);
8404         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8405                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8406
8407 #undef __GET_REG32
8408 #undef GET_REG32_LOOP
8409 #undef GET_REG32_1
8410
8411         tg3_full_unlock(tp);
8412 }
8413
8414 static int tg3_get_eeprom_len(struct net_device *dev)
8415 {
8416         struct tg3 *tp = netdev_priv(dev);
8417
8418         return tp->nvram_size;
8419 }
8420
8421 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8422 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8423 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8424
8425 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8426 {
8427         struct tg3 *tp = netdev_priv(dev);
8428         int ret;
8429         u8  *pd;
8430         u32 i, offset, len, b_offset, b_count;
8431         __le32 val;
8432
8433         if (tp->link_config.phy_is_low_power)
8434                 return -EAGAIN;
8435
8436         offset = eeprom->offset;
8437         len = eeprom->len;
8438         eeprom->len = 0;
8439
8440         eeprom->magic = TG3_EEPROM_MAGIC;
8441
8442         if (offset & 3) {
8443                 /* adjustments to start on required 4 byte boundary */
8444                 b_offset = offset & 3;
8445                 b_count = 4 - b_offset;
8446                 if (b_count > len) {
8447                         /* i.e. offset=1 len=2 */
8448                         b_count = len;
8449                 }
8450                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8451                 if (ret)
8452                         return ret;
8453                 memcpy(data, ((char*)&val) + b_offset, b_count);
8454                 len -= b_count;
8455                 offset += b_count;
8456                 eeprom->len += b_count;
8457         }
8458
8459         /* read bytes upto the last 4 byte boundary */
8460         pd = &data[eeprom->len];
8461         for (i = 0; i < (len - (len & 3)); i += 4) {
8462                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8463                 if (ret) {
8464                         eeprom->len += i;
8465                         return ret;
8466                 }
8467                 memcpy(pd + i, &val, 4);
8468         }
8469         eeprom->len += i;
8470
8471         if (len & 3) {
8472                 /* read last bytes not ending on 4 byte boundary */
8473                 pd = &data[eeprom->len];
8474                 b_count = len & 3;
8475                 b_offset = offset + len - b_count;
8476                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8477                 if (ret)
8478                         return ret;
8479                 memcpy(pd, &val, b_count);
8480                 eeprom->len += b_count;
8481         }
8482         return 0;
8483 }
8484
8485 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8486
8487 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8488 {
8489         struct tg3 *tp = netdev_priv(dev);
8490         int ret;
8491         u32 offset, len, b_offset, odd_len;
8492         u8 *buf;
8493         __le32 start, end;
8494
8495         if (tp->link_config.phy_is_low_power)
8496                 return -EAGAIN;
8497
8498         if (eeprom->magic != TG3_EEPROM_MAGIC)
8499                 return -EINVAL;
8500
8501         offset = eeprom->offset;
8502         len = eeprom->len;
8503
8504         if ((b_offset = (offset & 3))) {
8505                 /* adjustments to start on required 4 byte boundary */
8506                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8507                 if (ret)
8508                         return ret;
8509                 len += b_offset;
8510                 offset &= ~3;
8511                 if (len < 4)
8512                         len = 4;
8513         }
8514
8515         odd_len = 0;
8516         if (len & 3) {
8517                 /* adjustments to end on required 4 byte boundary */
8518                 odd_len = 1;
8519                 len = (len + 3) & ~3;
8520                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8521                 if (ret)
8522                         return ret;
8523         }
8524
8525         buf = data;
8526         if (b_offset || odd_len) {
8527                 buf = kmalloc(len, GFP_KERNEL);
8528                 if (!buf)
8529                         return -ENOMEM;
8530                 if (b_offset)
8531                         memcpy(buf, &start, 4);
8532                 if (odd_len)
8533                         memcpy(buf+len-4, &end, 4);
8534                 memcpy(buf + b_offset, data, eeprom->len);
8535         }
8536
8537         ret = tg3_nvram_write_block(tp, offset, len, buf);
8538
8539         if (buf != data)
8540                 kfree(buf);
8541
8542         return ret;
8543 }
8544
8545 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8546 {
8547         struct tg3 *tp = netdev_priv(dev);
8548
8549         cmd->supported = (SUPPORTED_Autoneg);
8550
8551         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8552                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8553                                    SUPPORTED_1000baseT_Full);
8554
8555         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8556                 cmd->supported |= (SUPPORTED_100baseT_Half |
8557                                   SUPPORTED_100baseT_Full |
8558                                   SUPPORTED_10baseT_Half |
8559                                   SUPPORTED_10baseT_Full |
8560                                   SUPPORTED_TP);
8561                 cmd->port = PORT_TP;
8562         } else {
8563                 cmd->supported |= SUPPORTED_FIBRE;
8564                 cmd->port = PORT_FIBRE;
8565         }
8566
8567         cmd->advertising = tp->link_config.advertising;
8568         if (netif_running(dev)) {
8569                 cmd->speed = tp->link_config.active_speed;
8570                 cmd->duplex = tp->link_config.active_duplex;
8571         }
8572         cmd->phy_address = PHY_ADDR;
8573         cmd->transceiver = 0;
8574         cmd->autoneg = tp->link_config.autoneg;
8575         cmd->maxtxpkt = 0;
8576         cmd->maxrxpkt = 0;
8577         return 0;
8578 }
8579
8580 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8581 {
8582         struct tg3 *tp = netdev_priv(dev);
8583
8584         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8585                 /* These are the only valid advertisement bits allowed.  */
8586                 if (cmd->autoneg == AUTONEG_ENABLE &&
8587                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8588                                           ADVERTISED_1000baseT_Full |
8589                                           ADVERTISED_Autoneg |
8590                                           ADVERTISED_FIBRE)))
8591                         return -EINVAL;
8592                 /* Fiber can only do SPEED_1000.  */
8593                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8594                          (cmd->speed != SPEED_1000))
8595                         return -EINVAL;
8596         /* Copper cannot force SPEED_1000.  */
8597         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8598                    (cmd->speed == SPEED_1000))
8599                 return -EINVAL;
8600         else if ((cmd->speed == SPEED_1000) &&
8601                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8602                 return -EINVAL;
8603
8604         tg3_full_lock(tp, 0);
8605
8606         tp->link_config.autoneg = cmd->autoneg;
8607         if (cmd->autoneg == AUTONEG_ENABLE) {
8608                 tp->link_config.advertising = (cmd->advertising |
8609                                               ADVERTISED_Autoneg);
8610                 tp->link_config.speed = SPEED_INVALID;
8611                 tp->link_config.duplex = DUPLEX_INVALID;
8612         } else {
8613                 tp->link_config.advertising = 0;
8614                 tp->link_config.speed = cmd->speed;
8615                 tp->link_config.duplex = cmd->duplex;
8616         }
8617
8618         tp->link_config.orig_speed = tp->link_config.speed;
8619         tp->link_config.orig_duplex = tp->link_config.duplex;
8620         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8621
8622         if (netif_running(dev))
8623                 tg3_setup_phy(tp, 1);
8624
8625         tg3_full_unlock(tp);
8626
8627         return 0;
8628 }
8629
8630 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8631 {
8632         struct tg3 *tp = netdev_priv(dev);
8633
8634         strcpy(info->driver, DRV_MODULE_NAME);
8635         strcpy(info->version, DRV_MODULE_VERSION);
8636         strcpy(info->fw_version, tp->fw_ver);
8637         strcpy(info->bus_info, pci_name(tp->pdev));
8638 }
8639
8640 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8641 {
8642         struct tg3 *tp = netdev_priv(dev);
8643
8644         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8645                 wol->supported = WAKE_MAGIC;
8646         else
8647                 wol->supported = 0;
8648         wol->wolopts = 0;
8649         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8650                 wol->wolopts = WAKE_MAGIC;
8651         memset(&wol->sopass, 0, sizeof(wol->sopass));
8652 }
8653
8654 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8655 {
8656         struct tg3 *tp = netdev_priv(dev);
8657
8658         if (wol->wolopts & ~WAKE_MAGIC)
8659                 return -EINVAL;
8660         if ((wol->wolopts & WAKE_MAGIC) &&
8661             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8662                 return -EINVAL;
8663
8664         spin_lock_bh(&tp->lock);
8665         if (wol->wolopts & WAKE_MAGIC)
8666                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8667         else
8668                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8669         spin_unlock_bh(&tp->lock);
8670
8671         return 0;
8672 }
8673
8674 static u32 tg3_get_msglevel(struct net_device *dev)
8675 {
8676         struct tg3 *tp = netdev_priv(dev);
8677         return tp->msg_enable;
8678 }
8679
8680 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8681 {
8682         struct tg3 *tp = netdev_priv(dev);
8683         tp->msg_enable = value;
8684 }
8685
8686 static int tg3_set_tso(struct net_device *dev, u32 value)
8687 {
8688         struct tg3 *tp = netdev_priv(dev);
8689
8690         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8691                 if (value)
8692                         return -EINVAL;
8693                 return 0;
8694         }
8695         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8696             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8697                 if (value) {
8698                         dev->features |= NETIF_F_TSO6;
8699                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8700                                 dev->features |= NETIF_F_TSO_ECN;
8701                 } else
8702                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8703         }
8704         return ethtool_op_set_tso(dev, value);
8705 }
8706
8707 static int tg3_nway_reset(struct net_device *dev)
8708 {
8709         struct tg3 *tp = netdev_priv(dev);
8710         u32 bmcr;
8711         int r;
8712
8713         if (!netif_running(dev))
8714                 return -EAGAIN;
8715
8716         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8717                 return -EINVAL;
8718
8719         spin_lock_bh(&tp->lock);
8720         r = -EINVAL;
8721         tg3_readphy(tp, MII_BMCR, &bmcr);
8722         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8723             ((bmcr & BMCR_ANENABLE) ||
8724              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8725                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8726                                            BMCR_ANENABLE);
8727                 r = 0;
8728         }
8729         spin_unlock_bh(&tp->lock);
8730
8731         return r;
8732 }
8733
8734 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8735 {
8736         struct tg3 *tp = netdev_priv(dev);
8737
8738         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8739         ering->rx_mini_max_pending = 0;
8740         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8741                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8742         else
8743                 ering->rx_jumbo_max_pending = 0;
8744
8745         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8746
8747         ering->rx_pending = tp->rx_pending;
8748         ering->rx_mini_pending = 0;
8749         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8750                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8751         else
8752                 ering->rx_jumbo_pending = 0;
8753
8754         ering->tx_pending = tp->tx_pending;
8755 }
8756
8757 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8758 {
8759         struct tg3 *tp = netdev_priv(dev);
8760         int irq_sync = 0, err = 0;
8761
8762         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8763             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8764             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8765             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8766             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8767              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8768                 return -EINVAL;
8769
8770         if (netif_running(dev)) {
8771                 tg3_netif_stop(tp);
8772                 irq_sync = 1;
8773         }
8774
8775         tg3_full_lock(tp, irq_sync);
8776
8777         tp->rx_pending = ering->rx_pending;
8778
8779         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8780             tp->rx_pending > 63)
8781                 tp->rx_pending = 63;
8782         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8783         tp->tx_pending = ering->tx_pending;
8784
8785         if (netif_running(dev)) {
8786                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8787                 err = tg3_restart_hw(tp, 1);
8788                 if (!err)
8789                         tg3_netif_start(tp);
8790         }
8791
8792         tg3_full_unlock(tp);
8793
8794         return err;
8795 }
8796
8797 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8798 {
8799         struct tg3 *tp = netdev_priv(dev);
8800
8801         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8802
8803         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8804                 epause->rx_pause = 1;
8805         else
8806                 epause->rx_pause = 0;
8807
8808         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8809                 epause->tx_pause = 1;
8810         else
8811                 epause->tx_pause = 0;
8812 }
8813
8814 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8815 {
8816         struct tg3 *tp = netdev_priv(dev);
8817         int irq_sync = 0, err = 0;
8818
8819         if (netif_running(dev)) {
8820                 tg3_netif_stop(tp);
8821                 irq_sync = 1;
8822         }
8823
8824         tg3_full_lock(tp, irq_sync);
8825
8826         if (epause->autoneg)
8827                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8828         else
8829                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8830         if (epause->rx_pause)
8831                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8832         else
8833                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8834         if (epause->tx_pause)
8835                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8836         else
8837                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8838
8839         if (netif_running(dev)) {
8840                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8841                 err = tg3_restart_hw(tp, 1);
8842                 if (!err)
8843                         tg3_netif_start(tp);
8844         }
8845
8846         tg3_full_unlock(tp);
8847
8848         return err;
8849 }
8850
8851 static u32 tg3_get_rx_csum(struct net_device *dev)
8852 {
8853         struct tg3 *tp = netdev_priv(dev);
8854         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8855 }
8856
8857 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8858 {
8859         struct tg3 *tp = netdev_priv(dev);
8860
8861         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8862                 if (data != 0)
8863                         return -EINVAL;
8864                 return 0;
8865         }
8866
8867         spin_lock_bh(&tp->lock);
8868         if (data)
8869                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8870         else
8871                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8872         spin_unlock_bh(&tp->lock);
8873
8874         return 0;
8875 }
8876
8877 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8878 {
8879         struct tg3 *tp = netdev_priv(dev);
8880
8881         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8882                 if (data != 0)
8883                         return -EINVAL;
8884                 return 0;
8885         }
8886
8887         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8888             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8889             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8890             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8891                 ethtool_op_set_tx_ipv6_csum(dev, data);
8892         else
8893                 ethtool_op_set_tx_csum(dev, data);
8894
8895         return 0;
8896 }
8897
8898 static int tg3_get_sset_count (struct net_device *dev, int sset)
8899 {
8900         switch (sset) {
8901         case ETH_SS_TEST:
8902                 return TG3_NUM_TEST;
8903         case ETH_SS_STATS:
8904                 return TG3_NUM_STATS;
8905         default:
8906                 return -EOPNOTSUPP;
8907         }
8908 }
8909
8910 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8911 {
8912         switch (stringset) {
8913         case ETH_SS_STATS:
8914                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8915                 break;
8916         case ETH_SS_TEST:
8917                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8918                 break;
8919         default:
8920                 WARN_ON(1);     /* we need a WARN() */
8921                 break;
8922         }
8923 }
8924
8925 static int tg3_phys_id(struct net_device *dev, u32 data)
8926 {
8927         struct tg3 *tp = netdev_priv(dev);
8928         int i;
8929
8930         if (!netif_running(tp->dev))
8931                 return -EAGAIN;
8932
8933         if (data == 0)
8934                 data = UINT_MAX / 2;
8935
8936         for (i = 0; i < (data * 2); i++) {
8937                 if ((i % 2) == 0)
8938                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8939                                            LED_CTRL_1000MBPS_ON |
8940                                            LED_CTRL_100MBPS_ON |
8941                                            LED_CTRL_10MBPS_ON |
8942                                            LED_CTRL_TRAFFIC_OVERRIDE |
8943                                            LED_CTRL_TRAFFIC_BLINK |
8944                                            LED_CTRL_TRAFFIC_LED);
8945
8946                 else
8947                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8948                                            LED_CTRL_TRAFFIC_OVERRIDE);
8949
8950                 if (msleep_interruptible(500))
8951                         break;
8952         }
8953         tw32(MAC_LED_CTRL, tp->led_ctrl);
8954         return 0;
8955 }
8956
8957 static void tg3_get_ethtool_stats (struct net_device *dev,
8958                                    struct ethtool_stats *estats, u64 *tmp_stats)
8959 {
8960         struct tg3 *tp = netdev_priv(dev);
8961         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8962 }
8963
8964 #define NVRAM_TEST_SIZE 0x100
8965 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
8966 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
8967 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
8968 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8969 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8970
8971 static int tg3_test_nvram(struct tg3 *tp)
8972 {
8973         u32 csum, magic;
8974         __le32 *buf;
8975         int i, j, k, err = 0, size;
8976
8977         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8978                 return -EIO;
8979
8980         if (magic == TG3_EEPROM_MAGIC)
8981                 size = NVRAM_TEST_SIZE;
8982         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8983                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8984                     TG3_EEPROM_SB_FORMAT_1) {
8985                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8986                         case TG3_EEPROM_SB_REVISION_0:
8987                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8988                                 break;
8989                         case TG3_EEPROM_SB_REVISION_2:
8990                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8991                                 break;
8992                         case TG3_EEPROM_SB_REVISION_3:
8993                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8994                                 break;
8995                         default:
8996                                 return 0;
8997                         }
8998                 } else
8999                         return 0;
9000         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9001                 size = NVRAM_SELFBOOT_HW_SIZE;
9002         else
9003                 return -EIO;
9004
9005         buf = kmalloc(size, GFP_KERNEL);
9006         if (buf == NULL)
9007                 return -ENOMEM;
9008
9009         err = -EIO;
9010         for (i = 0, j = 0; i < size; i += 4, j++) {
9011                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9012                         break;
9013         }
9014         if (i < size)
9015                 goto out;
9016
9017         /* Selfboot format */
9018         magic = swab32(le32_to_cpu(buf[0]));
9019         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9020             TG3_EEPROM_MAGIC_FW) {
9021                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9022
9023                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9024                     TG3_EEPROM_SB_REVISION_2) {
9025                         /* For rev 2, the csum doesn't include the MBA. */
9026                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9027                                 csum8 += buf8[i];
9028                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9029                                 csum8 += buf8[i];
9030                 } else {
9031                         for (i = 0; i < size; i++)
9032                                 csum8 += buf8[i];
9033                 }
9034
9035                 if (csum8 == 0) {
9036                         err = 0;
9037                         goto out;
9038                 }
9039
9040                 err = -EIO;
9041                 goto out;
9042         }
9043
9044         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9045             TG3_EEPROM_MAGIC_HW) {
9046                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9047                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9048                 u8 *buf8 = (u8 *) buf;
9049
9050                 /* Separate the parity bits and the data bytes.  */
9051                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9052                         if ((i == 0) || (i == 8)) {
9053                                 int l;
9054                                 u8 msk;
9055
9056                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9057                                         parity[k++] = buf8[i] & msk;
9058                                 i++;
9059                         }
9060                         else if (i == 16) {
9061                                 int l;
9062                                 u8 msk;
9063
9064                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9065                                         parity[k++] = buf8[i] & msk;
9066                                 i++;
9067
9068                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9069                                         parity[k++] = buf8[i] & msk;
9070                                 i++;
9071                         }
9072                         data[j++] = buf8[i];
9073                 }
9074
9075                 err = -EIO;
9076                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9077                         u8 hw8 = hweight8(data[i]);
9078
9079                         if ((hw8 & 0x1) && parity[i])
9080                                 goto out;
9081                         else if (!(hw8 & 0x1) && !parity[i])
9082                                 goto out;
9083                 }
9084                 err = 0;
9085                 goto out;
9086         }
9087
9088         /* Bootstrap checksum at offset 0x10 */
9089         csum = calc_crc((unsigned char *) buf, 0x10);
9090         if(csum != le32_to_cpu(buf[0x10/4]))
9091                 goto out;
9092
9093         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9094         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9095         if (csum != le32_to_cpu(buf[0xfc/4]))
9096                  goto out;
9097
9098         err = 0;
9099
9100 out:
9101         kfree(buf);
9102         return err;
9103 }
9104
9105 #define TG3_SERDES_TIMEOUT_SEC  2
9106 #define TG3_COPPER_TIMEOUT_SEC  6
9107
9108 static int tg3_test_link(struct tg3 *tp)
9109 {
9110         int i, max;
9111
9112         if (!netif_running(tp->dev))
9113                 return -ENODEV;
9114
9115         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9116                 max = TG3_SERDES_TIMEOUT_SEC;
9117         else
9118                 max = TG3_COPPER_TIMEOUT_SEC;
9119
9120         for (i = 0; i < max; i++) {
9121                 if (netif_carrier_ok(tp->dev))
9122                         return 0;
9123
9124                 if (msleep_interruptible(1000))
9125                         break;
9126         }
9127
9128         return -EIO;
9129 }
9130
9131 /* Only test the commonly used registers */
9132 static int tg3_test_registers(struct tg3 *tp)
9133 {
9134         int i, is_5705, is_5750;
9135         u32 offset, read_mask, write_mask, val, save_val, read_val;
9136         static struct {
9137                 u16 offset;
9138                 u16 flags;
9139 #define TG3_FL_5705     0x1
9140 #define TG3_FL_NOT_5705 0x2
9141 #define TG3_FL_NOT_5788 0x4
9142 #define TG3_FL_NOT_5750 0x8
9143                 u32 read_mask;
9144                 u32 write_mask;
9145         } reg_tbl[] = {
9146                 /* MAC Control Registers */
9147                 { MAC_MODE, TG3_FL_NOT_5705,
9148                         0x00000000, 0x00ef6f8c },
9149                 { MAC_MODE, TG3_FL_5705,
9150                         0x00000000, 0x01ef6b8c },
9151                 { MAC_STATUS, TG3_FL_NOT_5705,
9152                         0x03800107, 0x00000000 },
9153                 { MAC_STATUS, TG3_FL_5705,
9154                         0x03800100, 0x00000000 },
9155                 { MAC_ADDR_0_HIGH, 0x0000,
9156                         0x00000000, 0x0000ffff },
9157                 { MAC_ADDR_0_LOW, 0x0000,
9158                         0x00000000, 0xffffffff },
9159                 { MAC_RX_MTU_SIZE, 0x0000,
9160                         0x00000000, 0x0000ffff },
9161                 { MAC_TX_MODE, 0x0000,
9162                         0x00000000, 0x00000070 },
9163                 { MAC_TX_LENGTHS, 0x0000,
9164                         0x00000000, 0x00003fff },
9165                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9166                         0x00000000, 0x000007fc },
9167                 { MAC_RX_MODE, TG3_FL_5705,
9168                         0x00000000, 0x000007dc },
9169                 { MAC_HASH_REG_0, 0x0000,
9170                         0x00000000, 0xffffffff },
9171                 { MAC_HASH_REG_1, 0x0000,
9172                         0x00000000, 0xffffffff },
9173                 { MAC_HASH_REG_2, 0x0000,
9174                         0x00000000, 0xffffffff },
9175                 { MAC_HASH_REG_3, 0x0000,
9176                         0x00000000, 0xffffffff },
9177
9178                 /* Receive Data and Receive BD Initiator Control Registers. */
9179                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9180                         0x00000000, 0xffffffff },
9181                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9182                         0x00000000, 0xffffffff },
9183                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9184                         0x00000000, 0x00000003 },
9185                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9186                         0x00000000, 0xffffffff },
9187                 { RCVDBDI_STD_BD+0, 0x0000,
9188                         0x00000000, 0xffffffff },
9189                 { RCVDBDI_STD_BD+4, 0x0000,
9190                         0x00000000, 0xffffffff },
9191                 { RCVDBDI_STD_BD+8, 0x0000,
9192                         0x00000000, 0xffff0002 },
9193                 { RCVDBDI_STD_BD+0xc, 0x0000,
9194                         0x00000000, 0xffffffff },
9195
9196                 /* Receive BD Initiator Control Registers. */
9197                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9198                         0x00000000, 0xffffffff },
9199                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9200                         0x00000000, 0x000003ff },
9201                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9202                         0x00000000, 0xffffffff },
9203
9204                 /* Host Coalescing Control Registers. */
9205                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9206                         0x00000000, 0x00000004 },
9207                 { HOSTCC_MODE, TG3_FL_5705,
9208                         0x00000000, 0x000000f6 },
9209                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9210                         0x00000000, 0xffffffff },
9211                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9212                         0x00000000, 0x000003ff },
9213                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9214                         0x00000000, 0xffffffff },
9215                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9216                         0x00000000, 0x000003ff },
9217                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9218                         0x00000000, 0xffffffff },
9219                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9220                         0x00000000, 0x000000ff },
9221                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9222                         0x00000000, 0xffffffff },
9223                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9224                         0x00000000, 0x000000ff },
9225                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9226                         0x00000000, 0xffffffff },
9227                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9228                         0x00000000, 0xffffffff },
9229                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9230                         0x00000000, 0xffffffff },
9231                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9232                         0x00000000, 0x000000ff },
9233                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9234                         0x00000000, 0xffffffff },
9235                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9236                         0x00000000, 0x000000ff },
9237                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9238                         0x00000000, 0xffffffff },
9239                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9240                         0x00000000, 0xffffffff },
9241                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9242                         0x00000000, 0xffffffff },
9243                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9244                         0x00000000, 0xffffffff },
9245                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9246                         0x00000000, 0xffffffff },
9247                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9248                         0xffffffff, 0x00000000 },
9249                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9250                         0xffffffff, 0x00000000 },
9251
9252                 /* Buffer Manager Control Registers. */
9253                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9254                         0x00000000, 0x007fff80 },
9255                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9256                         0x00000000, 0x007fffff },
9257                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9258                         0x00000000, 0x0000003f },
9259                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9260                         0x00000000, 0x000001ff },
9261                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9262                         0x00000000, 0x000001ff },
9263                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9264                         0xffffffff, 0x00000000 },
9265                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9266                         0xffffffff, 0x00000000 },
9267
9268                 /* Mailbox Registers */
9269                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9270                         0x00000000, 0x000001ff },
9271                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9272                         0x00000000, 0x000001ff },
9273                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9274                         0x00000000, 0x000007ff },
9275                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9276                         0x00000000, 0x000001ff },
9277
9278                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9279         };
9280
9281         is_5705 = is_5750 = 0;
9282         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9283                 is_5705 = 1;
9284                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9285                         is_5750 = 1;
9286         }
9287
9288         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9289                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9290                         continue;
9291
9292                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9293                         continue;
9294
9295                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9296                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9297                         continue;
9298
9299                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9300                         continue;
9301
9302                 offset = (u32) reg_tbl[i].offset;
9303                 read_mask = reg_tbl[i].read_mask;
9304                 write_mask = reg_tbl[i].write_mask;
9305
9306                 /* Save the original register content */
9307                 save_val = tr32(offset);
9308
9309                 /* Determine the read-only value. */
9310                 read_val = save_val & read_mask;
9311
9312                 /* Write zero to the register, then make sure the read-only bits
9313                  * are not changed and the read/write bits are all zeros.
9314                  */
9315                 tw32(offset, 0);
9316
9317                 val = tr32(offset);
9318
9319                 /* Test the read-only and read/write bits. */
9320                 if (((val & read_mask) != read_val) || (val & write_mask))
9321                         goto out;
9322
9323                 /* Write ones to all the bits defined by RdMask and WrMask, then
9324                  * make sure the read-only bits are not changed and the
9325                  * read/write bits are all ones.
9326                  */
9327                 tw32(offset, read_mask | write_mask);
9328
9329                 val = tr32(offset);
9330
9331                 /* Test the read-only bits. */
9332                 if ((val & read_mask) != read_val)
9333                         goto out;
9334
9335                 /* Test the read/write bits. */
9336                 if ((val & write_mask) != write_mask)
9337                         goto out;
9338
9339                 tw32(offset, save_val);
9340         }
9341
9342         return 0;
9343
9344 out:
9345         if (netif_msg_hw(tp))
9346                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9347                        offset);
9348         tw32(offset, save_val);
9349         return -EIO;
9350 }
9351
9352 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9353 {
9354         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9355         int i;
9356         u32 j;
9357
9358         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9359                 for (j = 0; j < len; j += 4) {
9360                         u32 val;
9361
9362                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9363                         tg3_read_mem(tp, offset + j, &val);
9364                         if (val != test_pattern[i])
9365                                 return -EIO;
9366                 }
9367         }
9368         return 0;
9369 }
9370
9371 static int tg3_test_memory(struct tg3 *tp)
9372 {
9373         static struct mem_entry {
9374                 u32 offset;
9375                 u32 len;
9376         } mem_tbl_570x[] = {
9377                 { 0x00000000, 0x00b50},
9378                 { 0x00002000, 0x1c000},
9379                 { 0xffffffff, 0x00000}
9380         }, mem_tbl_5705[] = {
9381                 { 0x00000100, 0x0000c},
9382                 { 0x00000200, 0x00008},
9383                 { 0x00004000, 0x00800},
9384                 { 0x00006000, 0x01000},
9385                 { 0x00008000, 0x02000},
9386                 { 0x00010000, 0x0e000},
9387                 { 0xffffffff, 0x00000}
9388         }, mem_tbl_5755[] = {
9389                 { 0x00000200, 0x00008},
9390                 { 0x00004000, 0x00800},
9391                 { 0x00006000, 0x00800},
9392                 { 0x00008000, 0x02000},
9393                 { 0x00010000, 0x0c000},
9394                 { 0xffffffff, 0x00000}
9395         }, mem_tbl_5906[] = {
9396                 { 0x00000200, 0x00008},
9397                 { 0x00004000, 0x00400},
9398                 { 0x00006000, 0x00400},
9399                 { 0x00008000, 0x01000},
9400                 { 0x00010000, 0x01000},
9401                 { 0xffffffff, 0x00000}
9402         };
9403         struct mem_entry *mem_tbl;
9404         int err = 0;
9405         int i;
9406
9407         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9408                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9409                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9410                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9411                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9412                         mem_tbl = mem_tbl_5755;
9413                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9414                         mem_tbl = mem_tbl_5906;
9415                 else
9416                         mem_tbl = mem_tbl_5705;
9417         } else
9418                 mem_tbl = mem_tbl_570x;
9419
9420         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9421                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9422                     mem_tbl[i].len)) != 0)
9423                         break;
9424         }
9425
9426         return err;
9427 }
9428
9429 #define TG3_MAC_LOOPBACK        0
9430 #define TG3_PHY_LOOPBACK        1
9431
9432 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9433 {
9434         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9435         u32 desc_idx;
9436         struct sk_buff *skb, *rx_skb;
9437         u8 *tx_data;
9438         dma_addr_t map;
9439         int num_pkts, tx_len, rx_len, i, err;
9440         struct tg3_rx_buffer_desc *desc;
9441
9442         if (loopback_mode == TG3_MAC_LOOPBACK) {
9443                 /* HW errata - mac loopback fails in some cases on 5780.
9444                  * Normal traffic and PHY loopback are not affected by
9445                  * errata.
9446                  */
9447                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9448                         return 0;
9449
9450                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9451                            MAC_MODE_PORT_INT_LPBACK;
9452                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9453                         mac_mode |= MAC_MODE_LINK_POLARITY;
9454                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9455                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9456                 else
9457                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9458                 tw32(MAC_MODE, mac_mode);
9459         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9460                 u32 val;
9461
9462                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9463                         u32 phytest;
9464
9465                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9466                                 u32 phy;
9467
9468                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9469                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9470                                 if (!tg3_readphy(tp, 0x1b, &phy))
9471                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9472                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9473                         }
9474                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9475                 } else
9476                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9477
9478                 tg3_phy_toggle_automdix(tp, 0);
9479
9480                 tg3_writephy(tp, MII_BMCR, val);
9481                 udelay(40);
9482
9483                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9484                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9485                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9486                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9487                 } else
9488                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9489
9490                 /* reset to prevent losing 1st rx packet intermittently */
9491                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9492                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9493                         udelay(10);
9494                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9495                 }
9496                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9497                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9498                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9499                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9500                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9501                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9502                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9503                 }
9504                 tw32(MAC_MODE, mac_mode);
9505         }
9506         else
9507                 return -EINVAL;
9508
9509         err = -EIO;
9510
9511         tx_len = 1514;
9512         skb = netdev_alloc_skb(tp->dev, tx_len);
9513         if (!skb)
9514                 return -ENOMEM;
9515
9516         tx_data = skb_put(skb, tx_len);
9517         memcpy(tx_data, tp->dev->dev_addr, 6);
9518         memset(tx_data + 6, 0x0, 8);
9519
9520         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9521
9522         for (i = 14; i < tx_len; i++)
9523                 tx_data[i] = (u8) (i & 0xff);
9524
9525         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9526
9527         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9528              HOSTCC_MODE_NOW);
9529
9530         udelay(10);
9531
9532         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9533
9534         num_pkts = 0;
9535
9536         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9537
9538         tp->tx_prod++;
9539         num_pkts++;
9540
9541         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9542                      tp->tx_prod);
9543         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9544
9545         udelay(10);
9546
9547         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9548         for (i = 0; i < 25; i++) {
9549                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9550                        HOSTCC_MODE_NOW);
9551
9552                 udelay(10);
9553
9554                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9555                 rx_idx = tp->hw_status->idx[0].rx_producer;
9556                 if ((tx_idx == tp->tx_prod) &&
9557                     (rx_idx == (rx_start_idx + num_pkts)))
9558                         break;
9559         }
9560
9561         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9562         dev_kfree_skb(skb);
9563
9564         if (tx_idx != tp->tx_prod)
9565                 goto out;
9566
9567         if (rx_idx != rx_start_idx + num_pkts)
9568                 goto out;
9569
9570         desc = &tp->rx_rcb[rx_start_idx];
9571         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9572         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9573         if (opaque_key != RXD_OPAQUE_RING_STD)
9574                 goto out;
9575
9576         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9577             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9578                 goto out;
9579
9580         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9581         if (rx_len != tx_len)
9582                 goto out;
9583
9584         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9585
9586         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9587         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9588
9589         for (i = 14; i < tx_len; i++) {
9590                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9591                         goto out;
9592         }
9593         err = 0;
9594
9595         /* tg3_free_rings will unmap and free the rx_skb */
9596 out:
9597         return err;
9598 }
9599
9600 #define TG3_MAC_LOOPBACK_FAILED         1
9601 #define TG3_PHY_LOOPBACK_FAILED         2
9602 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9603                                          TG3_PHY_LOOPBACK_FAILED)
9604
9605 static int tg3_test_loopback(struct tg3 *tp)
9606 {
9607         int err = 0;
9608         u32 cpmuctrl = 0;
9609
9610         if (!netif_running(tp->dev))
9611                 return TG3_LOOPBACK_FAILED;
9612
9613         err = tg3_reset_hw(tp, 1);
9614         if (err)
9615                 return TG3_LOOPBACK_FAILED;
9616
9617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9618             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9619                 int i;
9620                 u32 status;
9621
9622                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9623
9624                 /* Wait for up to 40 microseconds to acquire lock. */
9625                 for (i = 0; i < 4; i++) {
9626                         status = tr32(TG3_CPMU_MUTEX_GNT);
9627                         if (status == CPMU_MUTEX_GNT_DRIVER)
9628                                 break;
9629                         udelay(10);
9630                 }
9631
9632                 if (status != CPMU_MUTEX_GNT_DRIVER)
9633                         return TG3_LOOPBACK_FAILED;
9634
9635                 /* Turn off link-based power management. */
9636                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9637                 tw32(TG3_CPMU_CTRL,
9638                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9639                                   CPMU_CTRL_LINK_AWARE_MODE));
9640         }
9641
9642         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9643                 err |= TG3_MAC_LOOPBACK_FAILED;
9644
9645         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9646             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9647                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9648
9649                 /* Release the mutex */
9650                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9651         }
9652
9653         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9654                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9655                         err |= TG3_PHY_LOOPBACK_FAILED;
9656         }
9657
9658         return err;
9659 }
9660
9661 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9662                           u64 *data)
9663 {
9664         struct tg3 *tp = netdev_priv(dev);
9665
9666         if (tp->link_config.phy_is_low_power)
9667                 tg3_set_power_state(tp, PCI_D0);
9668
9669         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9670
9671         if (tg3_test_nvram(tp) != 0) {
9672                 etest->flags |= ETH_TEST_FL_FAILED;
9673                 data[0] = 1;
9674         }
9675         if (tg3_test_link(tp) != 0) {
9676                 etest->flags |= ETH_TEST_FL_FAILED;
9677                 data[1] = 1;
9678         }
9679         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9680                 int err, irq_sync = 0;
9681
9682                 if (netif_running(dev)) {
9683                         tg3_netif_stop(tp);
9684                         irq_sync = 1;
9685                 }
9686
9687                 tg3_full_lock(tp, irq_sync);
9688
9689                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9690                 err = tg3_nvram_lock(tp);
9691                 tg3_halt_cpu(tp, RX_CPU_BASE);
9692                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9693                         tg3_halt_cpu(tp, TX_CPU_BASE);
9694                 if (!err)
9695                         tg3_nvram_unlock(tp);
9696
9697                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9698                         tg3_phy_reset(tp);
9699
9700                 if (tg3_test_registers(tp) != 0) {
9701                         etest->flags |= ETH_TEST_FL_FAILED;
9702                         data[2] = 1;
9703                 }
9704                 if (tg3_test_memory(tp) != 0) {
9705                         etest->flags |= ETH_TEST_FL_FAILED;
9706                         data[3] = 1;
9707                 }
9708                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9709                         etest->flags |= ETH_TEST_FL_FAILED;
9710
9711                 tg3_full_unlock(tp);
9712
9713                 if (tg3_test_interrupt(tp) != 0) {
9714                         etest->flags |= ETH_TEST_FL_FAILED;
9715                         data[5] = 1;
9716                 }
9717
9718                 tg3_full_lock(tp, 0);
9719
9720                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9721                 if (netif_running(dev)) {
9722                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9723                         if (!tg3_restart_hw(tp, 1))
9724                                 tg3_netif_start(tp);
9725                 }
9726
9727                 tg3_full_unlock(tp);
9728         }
9729         if (tp->link_config.phy_is_low_power)
9730                 tg3_set_power_state(tp, PCI_D3hot);
9731
9732 }
9733
9734 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9735 {
9736         struct mii_ioctl_data *data = if_mii(ifr);
9737         struct tg3 *tp = netdev_priv(dev);
9738         int err;
9739
9740         switch(cmd) {
9741         case SIOCGMIIPHY:
9742                 data->phy_id = PHY_ADDR;
9743
9744                 /* fallthru */
9745         case SIOCGMIIREG: {
9746                 u32 mii_regval;
9747
9748                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9749                         break;                  /* We have no PHY */
9750
9751                 if (tp->link_config.phy_is_low_power)
9752                         return -EAGAIN;
9753
9754                 spin_lock_bh(&tp->lock);
9755                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9756                 spin_unlock_bh(&tp->lock);
9757
9758                 data->val_out = mii_regval;
9759
9760                 return err;
9761         }
9762
9763         case SIOCSMIIREG:
9764                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9765                         break;                  /* We have no PHY */
9766
9767                 if (!capable(CAP_NET_ADMIN))
9768                         return -EPERM;
9769
9770                 if (tp->link_config.phy_is_low_power)
9771                         return -EAGAIN;
9772
9773                 spin_lock_bh(&tp->lock);
9774                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9775                 spin_unlock_bh(&tp->lock);
9776
9777                 return err;
9778
9779         default:
9780                 /* do nothing */
9781                 break;
9782         }
9783         return -EOPNOTSUPP;
9784 }
9785
9786 #if TG3_VLAN_TAG_USED
9787 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9788 {
9789         struct tg3 *tp = netdev_priv(dev);
9790
9791         if (netif_running(dev))
9792                 tg3_netif_stop(tp);
9793
9794         tg3_full_lock(tp, 0);
9795
9796         tp->vlgrp = grp;
9797
9798         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9799         __tg3_set_rx_mode(dev);
9800
9801         if (netif_running(dev))
9802                 tg3_netif_start(tp);
9803
9804         tg3_full_unlock(tp);
9805 }
9806 #endif
9807
9808 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9809 {
9810         struct tg3 *tp = netdev_priv(dev);
9811
9812         memcpy(ec, &tp->coal, sizeof(*ec));
9813         return 0;
9814 }
9815
9816 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9817 {
9818         struct tg3 *tp = netdev_priv(dev);
9819         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9820         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9821
9822         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9823                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9824                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9825                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9826                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9827         }
9828
9829         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9830             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9831             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9832             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9833             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9834             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9835             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9836             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9837             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9838             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9839                 return -EINVAL;
9840
9841         /* No rx interrupts will be generated if both are zero */
9842         if ((ec->rx_coalesce_usecs == 0) &&
9843             (ec->rx_max_coalesced_frames == 0))
9844                 return -EINVAL;
9845
9846         /* No tx interrupts will be generated if both are zero */
9847         if ((ec->tx_coalesce_usecs == 0) &&
9848             (ec->tx_max_coalesced_frames == 0))
9849                 return -EINVAL;
9850
9851         /* Only copy relevant parameters, ignore all others. */
9852         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9853         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9854         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9855         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9856         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9857         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9858         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9859         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9860         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9861
9862         if (netif_running(dev)) {
9863                 tg3_full_lock(tp, 0);
9864                 __tg3_set_coalesce(tp, &tp->coal);
9865                 tg3_full_unlock(tp);
9866         }
9867         return 0;
9868 }
9869
9870 static const struct ethtool_ops tg3_ethtool_ops = {
9871         .get_settings           = tg3_get_settings,
9872         .set_settings           = tg3_set_settings,
9873         .get_drvinfo            = tg3_get_drvinfo,
9874         .get_regs_len           = tg3_get_regs_len,
9875         .get_regs               = tg3_get_regs,
9876         .get_wol                = tg3_get_wol,
9877         .set_wol                = tg3_set_wol,
9878         .get_msglevel           = tg3_get_msglevel,
9879         .set_msglevel           = tg3_set_msglevel,
9880         .nway_reset             = tg3_nway_reset,
9881         .get_link               = ethtool_op_get_link,
9882         .get_eeprom_len         = tg3_get_eeprom_len,
9883         .get_eeprom             = tg3_get_eeprom,
9884         .set_eeprom             = tg3_set_eeprom,
9885         .get_ringparam          = tg3_get_ringparam,
9886         .set_ringparam          = tg3_set_ringparam,
9887         .get_pauseparam         = tg3_get_pauseparam,
9888         .set_pauseparam         = tg3_set_pauseparam,
9889         .get_rx_csum            = tg3_get_rx_csum,
9890         .set_rx_csum            = tg3_set_rx_csum,
9891         .set_tx_csum            = tg3_set_tx_csum,
9892         .set_sg                 = ethtool_op_set_sg,
9893         .set_tso                = tg3_set_tso,
9894         .self_test              = tg3_self_test,
9895         .get_strings            = tg3_get_strings,
9896         .phys_id                = tg3_phys_id,
9897         .get_ethtool_stats      = tg3_get_ethtool_stats,
9898         .get_coalesce           = tg3_get_coalesce,
9899         .set_coalesce           = tg3_set_coalesce,
9900         .get_sset_count         = tg3_get_sset_count,
9901 };
9902
9903 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9904 {
9905         u32 cursize, val, magic;
9906
9907         tp->nvram_size = EEPROM_CHIP_SIZE;
9908
9909         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9910                 return;
9911
9912         if ((magic != TG3_EEPROM_MAGIC) &&
9913             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9914             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9915                 return;
9916
9917         /*
9918          * Size the chip by reading offsets at increasing powers of two.
9919          * When we encounter our validation signature, we know the addressing
9920          * has wrapped around, and thus have our chip size.
9921          */
9922         cursize = 0x10;
9923
9924         while (cursize < tp->nvram_size) {
9925                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9926                         return;
9927
9928                 if (val == magic)
9929                         break;
9930
9931                 cursize <<= 1;
9932         }
9933
9934         tp->nvram_size = cursize;
9935 }
9936
9937 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9938 {
9939         u32 val;
9940
9941         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9942                 return;
9943
9944         /* Selfboot format */
9945         if (val != TG3_EEPROM_MAGIC) {
9946                 tg3_get_eeprom_size(tp);
9947                 return;
9948         }
9949
9950         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9951                 if (val != 0) {
9952                         tp->nvram_size = (val >> 16) * 1024;
9953                         return;
9954                 }
9955         }
9956         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
9957 }
9958
9959 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9960 {
9961         u32 nvcfg1;
9962
9963         nvcfg1 = tr32(NVRAM_CFG1);
9964         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9965                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9966         }
9967         else {
9968                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9969                 tw32(NVRAM_CFG1, nvcfg1);
9970         }
9971
9972         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9973             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9974                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9975                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9976                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9977                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9978                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9979                                 break;
9980                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9981                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9982                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9983                                 break;
9984                         case FLASH_VENDOR_ATMEL_EEPROM:
9985                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9986                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9987                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9988                                 break;
9989                         case FLASH_VENDOR_ST:
9990                                 tp->nvram_jedecnum = JEDEC_ST;
9991                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9992                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9993                                 break;
9994                         case FLASH_VENDOR_SAIFUN:
9995                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9996                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9997                                 break;
9998                         case FLASH_VENDOR_SST_SMALL:
9999                         case FLASH_VENDOR_SST_LARGE:
10000                                 tp->nvram_jedecnum = JEDEC_SST;
10001                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10002                                 break;
10003                 }
10004         }
10005         else {
10006                 tp->nvram_jedecnum = JEDEC_ATMEL;
10007                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10008                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10009         }
10010 }
10011
10012 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10013 {
10014         u32 nvcfg1;
10015
10016         nvcfg1 = tr32(NVRAM_CFG1);
10017
10018         /* NVRAM protection for TPM */
10019         if (nvcfg1 & (1 << 27))
10020                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10021
10022         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10023                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10024                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10025                         tp->nvram_jedecnum = JEDEC_ATMEL;
10026                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10027                         break;
10028                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10029                         tp->nvram_jedecnum = JEDEC_ATMEL;
10030                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10031                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10032                         break;
10033                 case FLASH_5752VENDOR_ST_M45PE10:
10034                 case FLASH_5752VENDOR_ST_M45PE20:
10035                 case FLASH_5752VENDOR_ST_M45PE40:
10036                         tp->nvram_jedecnum = JEDEC_ST;
10037                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10038                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10039                         break;
10040         }
10041
10042         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10043                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10044                         case FLASH_5752PAGE_SIZE_256:
10045                                 tp->nvram_pagesize = 256;
10046                                 break;
10047                         case FLASH_5752PAGE_SIZE_512:
10048                                 tp->nvram_pagesize = 512;
10049                                 break;
10050                         case FLASH_5752PAGE_SIZE_1K:
10051                                 tp->nvram_pagesize = 1024;
10052                                 break;
10053                         case FLASH_5752PAGE_SIZE_2K:
10054                                 tp->nvram_pagesize = 2048;
10055                                 break;
10056                         case FLASH_5752PAGE_SIZE_4K:
10057                                 tp->nvram_pagesize = 4096;
10058                                 break;
10059                         case FLASH_5752PAGE_SIZE_264:
10060                                 tp->nvram_pagesize = 264;
10061                                 break;
10062                 }
10063         }
10064         else {
10065                 /* For eeprom, set pagesize to maximum eeprom size */
10066                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10067
10068                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10069                 tw32(NVRAM_CFG1, nvcfg1);
10070         }
10071 }
10072
10073 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10074 {
10075         u32 nvcfg1, protect = 0;
10076
10077         nvcfg1 = tr32(NVRAM_CFG1);
10078
10079         /* NVRAM protection for TPM */
10080         if (nvcfg1 & (1 << 27)) {
10081                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10082                 protect = 1;
10083         }
10084
10085         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10086         switch (nvcfg1) {
10087                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10088                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10089                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10090                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10091                         tp->nvram_jedecnum = JEDEC_ATMEL;
10092                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10093                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10094                         tp->nvram_pagesize = 264;
10095                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10096                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10097                                 tp->nvram_size = (protect ? 0x3e200 :
10098                                                   TG3_NVRAM_SIZE_512KB);
10099                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10100                                 tp->nvram_size = (protect ? 0x1f200 :
10101                                                   TG3_NVRAM_SIZE_256KB);
10102                         else
10103                                 tp->nvram_size = (protect ? 0x1f200 :
10104                                                   TG3_NVRAM_SIZE_128KB);
10105                         break;
10106                 case FLASH_5752VENDOR_ST_M45PE10:
10107                 case FLASH_5752VENDOR_ST_M45PE20:
10108                 case FLASH_5752VENDOR_ST_M45PE40:
10109                         tp->nvram_jedecnum = JEDEC_ST;
10110                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10111                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10112                         tp->nvram_pagesize = 256;
10113                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10114                                 tp->nvram_size = (protect ?
10115                                                   TG3_NVRAM_SIZE_64KB :
10116                                                   TG3_NVRAM_SIZE_128KB);
10117                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10118                                 tp->nvram_size = (protect ?
10119                                                   TG3_NVRAM_SIZE_64KB :
10120                                                   TG3_NVRAM_SIZE_256KB);
10121                         else
10122                                 tp->nvram_size = (protect ?
10123                                                   TG3_NVRAM_SIZE_128KB :
10124                                                   TG3_NVRAM_SIZE_512KB);
10125                         break;
10126         }
10127 }
10128
10129 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10130 {
10131         u32 nvcfg1;
10132
10133         nvcfg1 = tr32(NVRAM_CFG1);
10134
10135         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10136                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10137                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10138                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10139                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10140                         tp->nvram_jedecnum = JEDEC_ATMEL;
10141                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10142                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10143
10144                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10145                         tw32(NVRAM_CFG1, nvcfg1);
10146                         break;
10147                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10148                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10149                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10150                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10151                         tp->nvram_jedecnum = JEDEC_ATMEL;
10152                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10153                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10154                         tp->nvram_pagesize = 264;
10155                         break;
10156                 case FLASH_5752VENDOR_ST_M45PE10:
10157                 case FLASH_5752VENDOR_ST_M45PE20:
10158                 case FLASH_5752VENDOR_ST_M45PE40:
10159                         tp->nvram_jedecnum = JEDEC_ST;
10160                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10161                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10162                         tp->nvram_pagesize = 256;
10163                         break;
10164         }
10165 }
10166
10167 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10168 {
10169         u32 nvcfg1, protect = 0;
10170
10171         nvcfg1 = tr32(NVRAM_CFG1);
10172
10173         /* NVRAM protection for TPM */
10174         if (nvcfg1 & (1 << 27)) {
10175                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10176                 protect = 1;
10177         }
10178
10179         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10180         switch (nvcfg1) {
10181                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10182                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10183                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10184                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10185                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10186                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10187                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10188                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10189                         tp->nvram_jedecnum = JEDEC_ATMEL;
10190                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10191                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10192                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10193                         tp->nvram_pagesize = 256;
10194                         break;
10195                 case FLASH_5761VENDOR_ST_A_M45PE20:
10196                 case FLASH_5761VENDOR_ST_A_M45PE40:
10197                 case FLASH_5761VENDOR_ST_A_M45PE80:
10198                 case FLASH_5761VENDOR_ST_A_M45PE16:
10199                 case FLASH_5761VENDOR_ST_M_M45PE20:
10200                 case FLASH_5761VENDOR_ST_M_M45PE40:
10201                 case FLASH_5761VENDOR_ST_M_M45PE80:
10202                 case FLASH_5761VENDOR_ST_M_M45PE16:
10203                         tp->nvram_jedecnum = JEDEC_ST;
10204                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10205                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10206                         tp->nvram_pagesize = 256;
10207                         break;
10208         }
10209
10210         if (protect) {
10211                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10212         } else {
10213                 switch (nvcfg1) {
10214                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10215                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10216                         case FLASH_5761VENDOR_ST_A_M45PE16:
10217                         case FLASH_5761VENDOR_ST_M_M45PE16:
10218                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10219                                 break;
10220                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10221                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10222                         case FLASH_5761VENDOR_ST_A_M45PE80:
10223                         case FLASH_5761VENDOR_ST_M_M45PE80:
10224                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10225                                 break;
10226                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10227                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10228                         case FLASH_5761VENDOR_ST_A_M45PE40:
10229                         case FLASH_5761VENDOR_ST_M_M45PE40:
10230                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10231                                 break;
10232                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10233                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10234                         case FLASH_5761VENDOR_ST_A_M45PE20:
10235                         case FLASH_5761VENDOR_ST_M_M45PE20:
10236                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10237                                 break;
10238                 }
10239         }
10240 }
10241
10242 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10243 {
10244         tp->nvram_jedecnum = JEDEC_ATMEL;
10245         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10246         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10247 }
10248
10249 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10250 static void __devinit tg3_nvram_init(struct tg3 *tp)
10251 {
10252         tw32_f(GRC_EEPROM_ADDR,
10253              (EEPROM_ADDR_FSM_RESET |
10254               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10255                EEPROM_ADDR_CLKPERD_SHIFT)));
10256
10257         msleep(1);
10258
10259         /* Enable seeprom accesses. */
10260         tw32_f(GRC_LOCAL_CTRL,
10261              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10262         udelay(100);
10263
10264         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10265             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10266                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10267
10268                 if (tg3_nvram_lock(tp)) {
10269                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10270                                "tg3_nvram_init failed.\n", tp->dev->name);
10271                         return;
10272                 }
10273                 tg3_enable_nvram_access(tp);
10274
10275                 tp->nvram_size = 0;
10276
10277                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10278                         tg3_get_5752_nvram_info(tp);
10279                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10280                         tg3_get_5755_nvram_info(tp);
10281                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10282                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10283                         tg3_get_5787_nvram_info(tp);
10284                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10285                         tg3_get_5761_nvram_info(tp);
10286                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10287                         tg3_get_5906_nvram_info(tp);
10288                 else
10289                         tg3_get_nvram_info(tp);
10290
10291                 if (tp->nvram_size == 0)
10292                         tg3_get_nvram_size(tp);
10293
10294                 tg3_disable_nvram_access(tp);
10295                 tg3_nvram_unlock(tp);
10296
10297         } else {
10298                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10299
10300                 tg3_get_eeprom_size(tp);
10301         }
10302 }
10303
10304 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10305                                         u32 offset, u32 *val)
10306 {
10307         u32 tmp;
10308         int i;
10309
10310         if (offset > EEPROM_ADDR_ADDR_MASK ||
10311             (offset % 4) != 0)
10312                 return -EINVAL;
10313
10314         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10315                                         EEPROM_ADDR_DEVID_MASK |
10316                                         EEPROM_ADDR_READ);
10317         tw32(GRC_EEPROM_ADDR,
10318              tmp |
10319              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10320              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10321               EEPROM_ADDR_ADDR_MASK) |
10322              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10323
10324         for (i = 0; i < 1000; i++) {
10325                 tmp = tr32(GRC_EEPROM_ADDR);
10326
10327                 if (tmp & EEPROM_ADDR_COMPLETE)
10328                         break;
10329                 msleep(1);
10330         }
10331         if (!(tmp & EEPROM_ADDR_COMPLETE))
10332                 return -EBUSY;
10333
10334         *val = tr32(GRC_EEPROM_DATA);
10335         return 0;
10336 }
10337
10338 #define NVRAM_CMD_TIMEOUT 10000
10339
10340 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10341 {
10342         int i;
10343
10344         tw32(NVRAM_CMD, nvram_cmd);
10345         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10346                 udelay(10);
10347                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10348                         udelay(10);
10349                         break;
10350                 }
10351         }
10352         if (i == NVRAM_CMD_TIMEOUT) {
10353                 return -EBUSY;
10354         }
10355         return 0;
10356 }
10357
10358 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10359 {
10360         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10361             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10362             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10363            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10364             (tp->nvram_jedecnum == JEDEC_ATMEL))
10365
10366                 addr = ((addr / tp->nvram_pagesize) <<
10367                         ATMEL_AT45DB0X1B_PAGE_POS) +
10368                        (addr % tp->nvram_pagesize);
10369
10370         return addr;
10371 }
10372
10373 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10374 {
10375         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10376             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10377             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10378            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10379             (tp->nvram_jedecnum == JEDEC_ATMEL))
10380
10381                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10382                         tp->nvram_pagesize) +
10383                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10384
10385         return addr;
10386 }
10387
10388 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10389 {
10390         int ret;
10391
10392         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10393                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10394
10395         offset = tg3_nvram_phys_addr(tp, offset);
10396
10397         if (offset > NVRAM_ADDR_MSK)
10398                 return -EINVAL;
10399
10400         ret = tg3_nvram_lock(tp);
10401         if (ret)
10402                 return ret;
10403
10404         tg3_enable_nvram_access(tp);
10405
10406         tw32(NVRAM_ADDR, offset);
10407         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10408                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10409
10410         if (ret == 0)
10411                 *val = swab32(tr32(NVRAM_RDDATA));
10412
10413         tg3_disable_nvram_access(tp);
10414
10415         tg3_nvram_unlock(tp);
10416
10417         return ret;
10418 }
10419
10420 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10421 {
10422         u32 v;
10423         int res = tg3_nvram_read(tp, offset, &v);
10424         if (!res)
10425                 *val = cpu_to_le32(v);
10426         return res;
10427 }
10428
10429 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10430 {
10431         int err;
10432         u32 tmp;
10433
10434         err = tg3_nvram_read(tp, offset, &tmp);
10435         *val = swab32(tmp);
10436         return err;
10437 }
10438
10439 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10440                                     u32 offset, u32 len, u8 *buf)
10441 {
10442         int i, j, rc = 0;
10443         u32 val;
10444
10445         for (i = 0; i < len; i += 4) {
10446                 u32 addr;
10447                 __le32 data;
10448
10449                 addr = offset + i;
10450
10451                 memcpy(&data, buf + i, 4);
10452
10453                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10454
10455                 val = tr32(GRC_EEPROM_ADDR);
10456                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10457
10458                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10459                         EEPROM_ADDR_READ);
10460                 tw32(GRC_EEPROM_ADDR, val |
10461                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10462                         (addr & EEPROM_ADDR_ADDR_MASK) |
10463                         EEPROM_ADDR_START |
10464                         EEPROM_ADDR_WRITE);
10465
10466                 for (j = 0; j < 1000; j++) {
10467                         val = tr32(GRC_EEPROM_ADDR);
10468
10469                         if (val & EEPROM_ADDR_COMPLETE)
10470                                 break;
10471                         msleep(1);
10472                 }
10473                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10474                         rc = -EBUSY;
10475                         break;
10476                 }
10477         }
10478
10479         return rc;
10480 }
10481
10482 /* offset and length are dword aligned */
10483 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10484                 u8 *buf)
10485 {
10486         int ret = 0;
10487         u32 pagesize = tp->nvram_pagesize;
10488         u32 pagemask = pagesize - 1;
10489         u32 nvram_cmd;
10490         u8 *tmp;
10491
10492         tmp = kmalloc(pagesize, GFP_KERNEL);
10493         if (tmp == NULL)
10494                 return -ENOMEM;
10495
10496         while (len) {
10497                 int j;
10498                 u32 phy_addr, page_off, size;
10499
10500                 phy_addr = offset & ~pagemask;
10501
10502                 for (j = 0; j < pagesize; j += 4) {
10503                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10504                                                 (__le32 *) (tmp + j))))
10505                                 break;
10506                 }
10507                 if (ret)
10508                         break;
10509
10510                 page_off = offset & pagemask;
10511                 size = pagesize;
10512                 if (len < size)
10513                         size = len;
10514
10515                 len -= size;
10516
10517                 memcpy(tmp + page_off, buf, size);
10518
10519                 offset = offset + (pagesize - page_off);
10520
10521                 tg3_enable_nvram_access(tp);
10522
10523                 /*
10524                  * Before we can erase the flash page, we need
10525                  * to issue a special "write enable" command.
10526                  */
10527                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10528
10529                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10530                         break;
10531
10532                 /* Erase the target page */
10533                 tw32(NVRAM_ADDR, phy_addr);
10534
10535                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10536                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10537
10538                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10539                         break;
10540
10541                 /* Issue another write enable to start the write. */
10542                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10543
10544                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10545                         break;
10546
10547                 for (j = 0; j < pagesize; j += 4) {
10548                         __be32 data;
10549
10550                         data = *((__be32 *) (tmp + j));
10551                         /* swab32(le32_to_cpu(data)), actually */
10552                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
10553
10554                         tw32(NVRAM_ADDR, phy_addr + j);
10555
10556                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10557                                 NVRAM_CMD_WR;
10558
10559                         if (j == 0)
10560                                 nvram_cmd |= NVRAM_CMD_FIRST;
10561                         else if (j == (pagesize - 4))
10562                                 nvram_cmd |= NVRAM_CMD_LAST;
10563
10564                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10565                                 break;
10566                 }
10567                 if (ret)
10568                         break;
10569         }
10570
10571         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10572         tg3_nvram_exec_cmd(tp, nvram_cmd);
10573
10574         kfree(tmp);
10575
10576         return ret;
10577 }
10578
10579 /* offset and length are dword aligned */
10580 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10581                 u8 *buf)
10582 {
10583         int i, ret = 0;
10584
10585         for (i = 0; i < len; i += 4, offset += 4) {
10586                 u32 page_off, phy_addr, nvram_cmd;
10587                 __be32 data;
10588
10589                 memcpy(&data, buf + i, 4);
10590                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10591
10592                 page_off = offset % tp->nvram_pagesize;
10593
10594                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10595
10596                 tw32(NVRAM_ADDR, phy_addr);
10597
10598                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10599
10600                 if ((page_off == 0) || (i == 0))
10601                         nvram_cmd |= NVRAM_CMD_FIRST;
10602                 if (page_off == (tp->nvram_pagesize - 4))
10603                         nvram_cmd |= NVRAM_CMD_LAST;
10604
10605                 if (i == (len - 4))
10606                         nvram_cmd |= NVRAM_CMD_LAST;
10607
10608                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10609                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10610                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10611                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10612                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10613                     (tp->nvram_jedecnum == JEDEC_ST) &&
10614                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10615
10616                         if ((ret = tg3_nvram_exec_cmd(tp,
10617                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10618                                 NVRAM_CMD_DONE)))
10619
10620                                 break;
10621                 }
10622                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10623                         /* We always do complete word writes to eeprom. */
10624                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10625                 }
10626
10627                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10628                         break;
10629         }
10630         return ret;
10631 }
10632
10633 /* offset and length are dword aligned */
10634 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10635 {
10636         int ret;
10637
10638         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10639                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10640                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10641                 udelay(40);
10642         }
10643
10644         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10645                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10646         }
10647         else {
10648                 u32 grc_mode;
10649
10650                 ret = tg3_nvram_lock(tp);
10651                 if (ret)
10652                         return ret;
10653
10654                 tg3_enable_nvram_access(tp);
10655                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10656                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10657                         tw32(NVRAM_WRITE1, 0x406);
10658
10659                 grc_mode = tr32(GRC_MODE);
10660                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10661
10662                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10663                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10664
10665                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10666                                 buf);
10667                 }
10668                 else {
10669                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10670                                 buf);
10671                 }
10672
10673                 grc_mode = tr32(GRC_MODE);
10674                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10675
10676                 tg3_disable_nvram_access(tp);
10677                 tg3_nvram_unlock(tp);
10678         }
10679
10680         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10681                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10682                 udelay(40);
10683         }
10684
10685         return ret;
10686 }
10687
10688 struct subsys_tbl_ent {
10689         u16 subsys_vendor, subsys_devid;
10690         u32 phy_id;
10691 };
10692
10693 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10694         /* Broadcom boards. */
10695         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10696         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10697         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10698         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10699         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10700         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10701         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10702         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10703         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10704         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10705         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10706
10707         /* 3com boards. */
10708         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10709         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10710         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10711         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10712         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10713
10714         /* DELL boards. */
10715         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10716         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10717         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10718         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10719
10720         /* Compaq boards. */
10721         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10722         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10723         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10724         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10725         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10726
10727         /* IBM boards. */
10728         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10729 };
10730
10731 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10732 {
10733         int i;
10734
10735         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10736                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10737                      tp->pdev->subsystem_vendor) &&
10738                     (subsys_id_to_phy_id[i].subsys_devid ==
10739                      tp->pdev->subsystem_device))
10740                         return &subsys_id_to_phy_id[i];
10741         }
10742         return NULL;
10743 }
10744
10745 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10746 {
10747         u32 val;
10748         u16 pmcsr;
10749
10750         /* On some early chips the SRAM cannot be accessed in D3hot state,
10751          * so need make sure we're in D0.
10752          */
10753         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10754         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10755         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10756         msleep(1);
10757
10758         /* Make sure register accesses (indirect or otherwise)
10759          * will function correctly.
10760          */
10761         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10762                                tp->misc_host_ctrl);
10763
10764         /* The memory arbiter has to be enabled in order for SRAM accesses
10765          * to succeed.  Normally on powerup the tg3 chip firmware will make
10766          * sure it is enabled, but other entities such as system netboot
10767          * code might disable it.
10768          */
10769         val = tr32(MEMARB_MODE);
10770         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10771
10772         tp->phy_id = PHY_ID_INVALID;
10773         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10774
10775         /* Assume an onboard device and WOL capable by default.  */
10776         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10777
10778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10779                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10780                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10781                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10782                 }
10783                 val = tr32(VCPU_CFGSHDW);
10784                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10785                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10786                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10787                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10788                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10789                 return;
10790         }
10791
10792         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10793         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10794                 u32 nic_cfg, led_cfg;
10795                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10796                 int eeprom_phy_serdes = 0;
10797
10798                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10799                 tp->nic_sram_data_cfg = nic_cfg;
10800
10801                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10802                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10803                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10804                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10805                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10806                     (ver > 0) && (ver < 0x100))
10807                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10808
10809                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10810                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10811                         eeprom_phy_serdes = 1;
10812
10813                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10814                 if (nic_phy_id != 0) {
10815                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10816                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10817
10818                         eeprom_phy_id  = (id1 >> 16) << 10;
10819                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10820                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10821                 } else
10822                         eeprom_phy_id = 0;
10823
10824                 tp->phy_id = eeprom_phy_id;
10825                 if (eeprom_phy_serdes) {
10826                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10827                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10828                         else
10829                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10830                 }
10831
10832                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10833                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10834                                     SHASTA_EXT_LED_MODE_MASK);
10835                 else
10836                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10837
10838                 switch (led_cfg) {
10839                 default:
10840                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10841                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10842                         break;
10843
10844                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10845                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10846                         break;
10847
10848                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10849                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10850
10851                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10852                          * read on some older 5700/5701 bootcode.
10853                          */
10854                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10855                             ASIC_REV_5700 ||
10856                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10857                             ASIC_REV_5701)
10858                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10859
10860                         break;
10861
10862                 case SHASTA_EXT_LED_SHARED:
10863                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10864                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10865                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10866                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10867                                                  LED_CTRL_MODE_PHY_2);
10868                         break;
10869
10870                 case SHASTA_EXT_LED_MAC:
10871                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10872                         break;
10873
10874                 case SHASTA_EXT_LED_COMBO:
10875                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10876                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10877                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10878                                                  LED_CTRL_MODE_PHY_2);
10879                         break;
10880
10881                 };
10882
10883                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10884                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10885                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10886                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10887
10888                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
10889                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10890
10891                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10892                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10893                         if ((tp->pdev->subsystem_vendor ==
10894                              PCI_VENDOR_ID_ARIMA) &&
10895                             (tp->pdev->subsystem_device == 0x205a ||
10896                              tp->pdev->subsystem_device == 0x2063))
10897                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10898                 } else {
10899                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10900                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10901                 }
10902
10903                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10904                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10905                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10906                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10907                 }
10908                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10909                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10910                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10911                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10912                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10913
10914                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10915                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10916                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10917
10918                 if (cfg2 & (1 << 17))
10919                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10920
10921                 /* serdes signal pre-emphasis in register 0x590 set by */
10922                 /* bootcode if bit 18 is set */
10923                 if (cfg2 & (1 << 18))
10924                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10925
10926                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10927                         u32 cfg3;
10928
10929                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10930                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10931                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10932                 }
10933         }
10934 }
10935
10936 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
10937 {
10938         int i;
10939         u32 val;
10940
10941         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
10942         tw32(OTP_CTRL, cmd);
10943
10944         /* Wait for up to 1 ms for command to execute. */
10945         for (i = 0; i < 100; i++) {
10946                 val = tr32(OTP_STATUS);
10947                 if (val & OTP_STATUS_CMD_DONE)
10948                         break;
10949                 udelay(10);
10950         }
10951
10952         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
10953 }
10954
10955 /* Read the gphy configuration from the OTP region of the chip.  The gphy
10956  * configuration is a 32-bit value that straddles the alignment boundary.
10957  * We do two 32-bit reads and then shift and merge the results.
10958  */
10959 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
10960 {
10961         u32 bhalf_otp, thalf_otp;
10962
10963         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
10964
10965         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
10966                 return 0;
10967
10968         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
10969
10970         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10971                 return 0;
10972
10973         thalf_otp = tr32(OTP_READ_DATA);
10974
10975         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
10976
10977         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10978                 return 0;
10979
10980         bhalf_otp = tr32(OTP_READ_DATA);
10981
10982         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
10983 }
10984
10985 static int __devinit tg3_phy_probe(struct tg3 *tp)
10986 {
10987         u32 hw_phy_id_1, hw_phy_id_2;
10988         u32 hw_phy_id, hw_phy_id_masked;
10989         int err;
10990
10991         /* Reading the PHY ID register can conflict with ASF
10992          * firwmare access to the PHY hardware.
10993          */
10994         err = 0;
10995         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10996             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10997                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10998         } else {
10999                 /* Now read the physical PHY_ID from the chip and verify
11000                  * that it is sane.  If it doesn't look good, we fall back
11001                  * to either the hard-coded table based PHY_ID and failing
11002                  * that the value found in the eeprom area.
11003                  */
11004                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11005                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11006
11007                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11008                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11009                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11010
11011                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11012         }
11013
11014         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11015                 tp->phy_id = hw_phy_id;
11016                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11017                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11018                 else
11019                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11020         } else {
11021                 if (tp->phy_id != PHY_ID_INVALID) {
11022                         /* Do nothing, phy ID already set up in
11023                          * tg3_get_eeprom_hw_cfg().
11024                          */
11025                 } else {
11026                         struct subsys_tbl_ent *p;
11027
11028                         /* No eeprom signature?  Try the hardcoded
11029                          * subsys device table.
11030                          */
11031                         p = lookup_by_subsys(tp);
11032                         if (!p)
11033                                 return -ENODEV;
11034
11035                         tp->phy_id = p->phy_id;
11036                         if (!tp->phy_id ||
11037                             tp->phy_id == PHY_ID_BCM8002)
11038                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11039                 }
11040         }
11041
11042         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11043             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11044             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11045                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11046
11047                 tg3_readphy(tp, MII_BMSR, &bmsr);
11048                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11049                     (bmsr & BMSR_LSTATUS))
11050                         goto skip_phy_reset;
11051
11052                 err = tg3_phy_reset(tp);
11053                 if (err)
11054                         return err;
11055
11056                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11057                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11058                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11059                 tg3_ctrl = 0;
11060                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11061                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11062                                     MII_TG3_CTRL_ADV_1000_FULL);
11063                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11064                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11065                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11066                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11067                 }
11068
11069                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11070                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11071                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11072                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11073                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11074
11075                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11076                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11077
11078                         tg3_writephy(tp, MII_BMCR,
11079                                      BMCR_ANENABLE | BMCR_ANRESTART);
11080                 }
11081                 tg3_phy_set_wirespeed(tp);
11082
11083                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11084                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11085                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11086         }
11087
11088 skip_phy_reset:
11089         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11090                 err = tg3_init_5401phy_dsp(tp);
11091                 if (err)
11092                         return err;
11093         }
11094
11095         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11096                 err = tg3_init_5401phy_dsp(tp);
11097         }
11098
11099         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11100                 tp->link_config.advertising =
11101                         (ADVERTISED_1000baseT_Half |
11102                          ADVERTISED_1000baseT_Full |
11103                          ADVERTISED_Autoneg |
11104                          ADVERTISED_FIBRE);
11105         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11106                 tp->link_config.advertising &=
11107                         ~(ADVERTISED_1000baseT_Half |
11108                           ADVERTISED_1000baseT_Full);
11109
11110         return err;
11111 }
11112
11113 static void __devinit tg3_read_partno(struct tg3 *tp)
11114 {
11115         unsigned char vpd_data[256];
11116         unsigned int i;
11117         u32 magic;
11118
11119         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11120                 goto out_not_found;
11121
11122         if (magic == TG3_EEPROM_MAGIC) {
11123                 for (i = 0; i < 256; i += 4) {
11124                         u32 tmp;
11125
11126                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11127                                 goto out_not_found;
11128
11129                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11130                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11131                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11132                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11133                 }
11134         } else {
11135                 int vpd_cap;
11136
11137                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11138                 for (i = 0; i < 256; i += 4) {
11139                         u32 tmp, j = 0;
11140                         __le32 v;
11141                         u16 tmp16;
11142
11143                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11144                                               i);
11145                         while (j++ < 100) {
11146                                 pci_read_config_word(tp->pdev, vpd_cap +
11147                                                      PCI_VPD_ADDR, &tmp16);
11148                                 if (tmp16 & 0x8000)
11149                                         break;
11150                                 msleep(1);
11151                         }
11152                         if (!(tmp16 & 0x8000))
11153                                 goto out_not_found;
11154
11155                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11156                                               &tmp);
11157                         v = cpu_to_le32(tmp);
11158                         memcpy(&vpd_data[i], &v, 4);
11159                 }
11160         }
11161
11162         /* Now parse and find the part number. */
11163         for (i = 0; i < 254; ) {
11164                 unsigned char val = vpd_data[i];
11165                 unsigned int block_end;
11166
11167                 if (val == 0x82 || val == 0x91) {
11168                         i = (i + 3 +
11169                              (vpd_data[i + 1] +
11170                               (vpd_data[i + 2] << 8)));
11171                         continue;
11172                 }
11173
11174                 if (val != 0x90)
11175                         goto out_not_found;
11176
11177                 block_end = (i + 3 +
11178                              (vpd_data[i + 1] +
11179                               (vpd_data[i + 2] << 8)));
11180                 i += 3;
11181
11182                 if (block_end > 256)
11183                         goto out_not_found;
11184
11185                 while (i < (block_end - 2)) {
11186                         if (vpd_data[i + 0] == 'P' &&
11187                             vpd_data[i + 1] == 'N') {
11188                                 int partno_len = vpd_data[i + 2];
11189
11190                                 i += 3;
11191                                 if (partno_len > 24 || (partno_len + i) > 256)
11192                                         goto out_not_found;
11193
11194                                 memcpy(tp->board_part_number,
11195                                        &vpd_data[i], partno_len);
11196
11197                                 /* Success. */
11198                                 return;
11199                         }
11200                         i += 3 + vpd_data[i + 2];
11201                 }
11202
11203                 /* Part number not found. */
11204                 goto out_not_found;
11205         }
11206
11207 out_not_found:
11208         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11209                 strcpy(tp->board_part_number, "BCM95906");
11210         else
11211                 strcpy(tp->board_part_number, "none");
11212 }
11213
11214 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11215 {
11216         u32 val;
11217
11218         if (tg3_nvram_read_swab(tp, offset, &val) ||
11219             (val & 0xfc000000) != 0x0c000000 ||
11220             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11221             val != 0)
11222                 return 0;
11223
11224         return 1;
11225 }
11226
11227 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11228 {
11229         u32 val, offset, start;
11230         u32 ver_offset;
11231         int i, bcnt;
11232
11233         if (tg3_nvram_read_swab(tp, 0, &val))
11234                 return;
11235
11236         if (val != TG3_EEPROM_MAGIC)
11237                 return;
11238
11239         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11240             tg3_nvram_read_swab(tp, 0x4, &start))
11241                 return;
11242
11243         offset = tg3_nvram_logical_addr(tp, offset);
11244
11245         if (!tg3_fw_img_is_valid(tp, offset) ||
11246             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11247                 return;
11248
11249         offset = offset + ver_offset - start;
11250         for (i = 0; i < 16; i += 4) {
11251                 __le32 v;
11252                 if (tg3_nvram_read_le(tp, offset + i, &v))
11253                         return;
11254
11255                 memcpy(tp->fw_ver + i, &v, 4);
11256         }
11257
11258         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11259              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11260                 return;
11261
11262         for (offset = TG3_NVM_DIR_START;
11263              offset < TG3_NVM_DIR_END;
11264              offset += TG3_NVM_DIRENT_SIZE) {
11265                 if (tg3_nvram_read_swab(tp, offset, &val))
11266                         return;
11267
11268                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11269                         break;
11270         }
11271
11272         if (offset == TG3_NVM_DIR_END)
11273                 return;
11274
11275         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11276                 start = 0x08000000;
11277         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11278                 return;
11279
11280         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11281             !tg3_fw_img_is_valid(tp, offset) ||
11282             tg3_nvram_read_swab(tp, offset + 8, &val))
11283                 return;
11284
11285         offset += val - start;
11286
11287         bcnt = strlen(tp->fw_ver);
11288
11289         tp->fw_ver[bcnt++] = ',';
11290         tp->fw_ver[bcnt++] = ' ';
11291
11292         for (i = 0; i < 4; i++) {
11293                 __le32 v;
11294                 if (tg3_nvram_read_le(tp, offset, &v))
11295                         return;
11296
11297                 offset += sizeof(v);
11298
11299                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11300                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11301                         break;
11302                 }
11303
11304                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11305                 bcnt += sizeof(v);
11306         }
11307
11308         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11309 }
11310
11311 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11312
11313 static int __devinit tg3_get_invariants(struct tg3 *tp)
11314 {
11315         static struct pci_device_id write_reorder_chipsets[] = {
11316                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11317                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11318                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11319                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11320                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11321                              PCI_DEVICE_ID_VIA_8385_0) },
11322                 { },
11323         };
11324         u32 misc_ctrl_reg;
11325         u32 cacheline_sz_reg;
11326         u32 pci_state_reg, grc_misc_cfg;
11327         u32 val;
11328         u16 pci_cmd;
11329         int err, pcie_cap;
11330
11331         /* Force memory write invalidate off.  If we leave it on,
11332          * then on 5700_BX chips we have to enable a workaround.
11333          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11334          * to match the cacheline size.  The Broadcom driver have this
11335          * workaround but turns MWI off all the times so never uses
11336          * it.  This seems to suggest that the workaround is insufficient.
11337          */
11338         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11339         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11340         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11341
11342         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11343          * has the register indirect write enable bit set before
11344          * we try to access any of the MMIO registers.  It is also
11345          * critical that the PCI-X hw workaround situation is decided
11346          * before that as well.
11347          */
11348         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11349                               &misc_ctrl_reg);
11350
11351         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11352                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11353         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11354                 u32 prod_id_asic_rev;
11355
11356                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11357                                       &prod_id_asic_rev);
11358                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11359         }
11360
11361         /* Wrong chip ID in 5752 A0. This code can be removed later
11362          * as A0 is not in production.
11363          */
11364         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11365                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11366
11367         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11368          * we need to disable memory and use config. cycles
11369          * only to access all registers. The 5702/03 chips
11370          * can mistakenly decode the special cycles from the
11371          * ICH chipsets as memory write cycles, causing corruption
11372          * of register and memory space. Only certain ICH bridges
11373          * will drive special cycles with non-zero data during the
11374          * address phase which can fall within the 5703's address
11375          * range. This is not an ICH bug as the PCI spec allows
11376          * non-zero address during special cycles. However, only
11377          * these ICH bridges are known to drive non-zero addresses
11378          * during special cycles.
11379          *
11380          * Since special cycles do not cross PCI bridges, we only
11381          * enable this workaround if the 5703 is on the secondary
11382          * bus of these ICH bridges.
11383          */
11384         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11385             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11386                 static struct tg3_dev_id {
11387                         u32     vendor;
11388                         u32     device;
11389                         u32     rev;
11390                 } ich_chipsets[] = {
11391                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11392                           PCI_ANY_ID },
11393                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11394                           PCI_ANY_ID },
11395                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11396                           0xa },
11397                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11398                           PCI_ANY_ID },
11399                         { },
11400                 };
11401                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11402                 struct pci_dev *bridge = NULL;
11403
11404                 while (pci_id->vendor != 0) {
11405                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11406                                                 bridge);
11407                         if (!bridge) {
11408                                 pci_id++;
11409                                 continue;
11410                         }
11411                         if (pci_id->rev != PCI_ANY_ID) {
11412                                 if (bridge->revision > pci_id->rev)
11413                                         continue;
11414                         }
11415                         if (bridge->subordinate &&
11416                             (bridge->subordinate->number ==
11417                              tp->pdev->bus->number)) {
11418
11419                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11420                                 pci_dev_put(bridge);
11421                                 break;
11422                         }
11423                 }
11424         }
11425
11426         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11427                 static struct tg3_dev_id {
11428                         u32     vendor;
11429                         u32     device;
11430                 } bridge_chipsets[] = {
11431                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11432                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11433                         { },
11434                 };
11435                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11436                 struct pci_dev *bridge = NULL;
11437
11438                 while (pci_id->vendor != 0) {
11439                         bridge = pci_get_device(pci_id->vendor,
11440                                                 pci_id->device,
11441                                                 bridge);
11442                         if (!bridge) {
11443                                 pci_id++;
11444                                 continue;
11445                         }
11446                         if (bridge->subordinate &&
11447                             (bridge->subordinate->number <=
11448                              tp->pdev->bus->number) &&
11449                             (bridge->subordinate->subordinate >=
11450                              tp->pdev->bus->number)) {
11451                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11452                                 pci_dev_put(bridge);
11453                                 break;
11454                         }
11455                 }
11456         }
11457
11458         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11459          * DMA addresses > 40-bit. This bridge may have other additional
11460          * 57xx devices behind it in some 4-port NIC designs for example.
11461          * Any tg3 device found behind the bridge will also need the 40-bit
11462          * DMA workaround.
11463          */
11464         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11465             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11466                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11467                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11468                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11469         }
11470         else {
11471                 struct pci_dev *bridge = NULL;
11472
11473                 do {
11474                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11475                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11476                                                 bridge);
11477                         if (bridge && bridge->subordinate &&
11478                             (bridge->subordinate->number <=
11479                              tp->pdev->bus->number) &&
11480                             (bridge->subordinate->subordinate >=
11481                              tp->pdev->bus->number)) {
11482                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11483                                 pci_dev_put(bridge);
11484                                 break;
11485                         }
11486                 } while (bridge);
11487         }
11488
11489         /* Initialize misc host control in PCI block. */
11490         tp->misc_host_ctrl |= (misc_ctrl_reg &
11491                                MISC_HOST_CTRL_CHIPREV);
11492         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11493                                tp->misc_host_ctrl);
11494
11495         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11496                               &cacheline_sz_reg);
11497
11498         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11499         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11500         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11501         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11502
11503         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11504             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11505                 tp->pdev_peer = tg3_find_peer(tp);
11506
11507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11508             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11509             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11510             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11512             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11513             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11514             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11515                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11516
11517         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11518             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11519                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11520
11521         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11522                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11523                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11524                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11525                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11526                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11527                      tp->pdev_peer == tp->pdev))
11528                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11529
11530                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11531                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11532                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11533                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11534                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11535                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11536                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11537                 } else {
11538                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11539                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11540                                 ASIC_REV_5750 &&
11541                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11542                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11543                 }
11544         }
11545
11546         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11547             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11548             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11549             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11550             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11551             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11552             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11553             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11554                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11555
11556         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11557         if (pcie_cap != 0) {
11558                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11559
11560                 pcie_set_readrq(tp->pdev, 4096);
11561
11562                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11563                         u16 lnkctl;
11564
11565                         pci_read_config_word(tp->pdev,
11566                                              pcie_cap + PCI_EXP_LNKCTL,
11567                                              &lnkctl);
11568                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11569                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11570                 }
11571         }
11572
11573         /* If we have an AMD 762 or VIA K8T800 chipset, write
11574          * reordering to the mailbox registers done by the host
11575          * controller can cause major troubles.  We read back from
11576          * every mailbox register write to force the writes to be
11577          * posted to the chip in order.
11578          */
11579         if (pci_dev_present(write_reorder_chipsets) &&
11580             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11581                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11582
11583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11584             tp->pci_lat_timer < 64) {
11585                 tp->pci_lat_timer = 64;
11586
11587                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11588                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11589                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11590                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11591
11592                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11593                                        cacheline_sz_reg);
11594         }
11595
11596         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11597             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11598                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11599                 if (!tp->pcix_cap) {
11600                         printk(KERN_ERR PFX "Cannot find PCI-X "
11601                                             "capability, aborting.\n");
11602                         return -EIO;
11603                 }
11604         }
11605
11606         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11607                               &pci_state_reg);
11608
11609         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11610                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11611
11612                 /* If this is a 5700 BX chipset, and we are in PCI-X
11613                  * mode, enable register write workaround.
11614                  *
11615                  * The workaround is to use indirect register accesses
11616                  * for all chip writes not to mailbox registers.
11617                  */
11618                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11619                         u32 pm_reg;
11620
11621                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11622
11623                         /* The chip can have it's power management PCI config
11624                          * space registers clobbered due to this bug.
11625                          * So explicitly force the chip into D0 here.
11626                          */
11627                         pci_read_config_dword(tp->pdev,
11628                                               tp->pm_cap + PCI_PM_CTRL,
11629                                               &pm_reg);
11630                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11631                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11632                         pci_write_config_dword(tp->pdev,
11633                                                tp->pm_cap + PCI_PM_CTRL,
11634                                                pm_reg);
11635
11636                         /* Also, force SERR#/PERR# in PCI command. */
11637                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11638                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11639                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11640                 }
11641         }
11642
11643         /* 5700 BX chips need to have their TX producer index mailboxes
11644          * written twice to workaround a bug.
11645          */
11646         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11647                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11648
11649         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11650                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11651         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11652                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11653
11654         /* Chip-specific fixup from Broadcom driver */
11655         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11656             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11657                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11658                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11659         }
11660
11661         /* Default fast path register access methods */
11662         tp->read32 = tg3_read32;
11663         tp->write32 = tg3_write32;
11664         tp->read32_mbox = tg3_read32;
11665         tp->write32_mbox = tg3_write32;
11666         tp->write32_tx_mbox = tg3_write32;
11667         tp->write32_rx_mbox = tg3_write32;
11668
11669         /* Various workaround register access methods */
11670         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11671                 tp->write32 = tg3_write_indirect_reg32;
11672         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11673                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11674                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11675                 /*
11676                  * Back to back register writes can cause problems on these
11677                  * chips, the workaround is to read back all reg writes
11678                  * except those to mailbox regs.
11679                  *
11680                  * See tg3_write_indirect_reg32().
11681                  */
11682                 tp->write32 = tg3_write_flush_reg32;
11683         }
11684
11685
11686         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11687             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11688                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11689                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11690                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11691         }
11692
11693         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11694                 tp->read32 = tg3_read_indirect_reg32;
11695                 tp->write32 = tg3_write_indirect_reg32;
11696                 tp->read32_mbox = tg3_read_indirect_mbox;
11697                 tp->write32_mbox = tg3_write_indirect_mbox;
11698                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11699                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11700
11701                 iounmap(tp->regs);
11702                 tp->regs = NULL;
11703
11704                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11705                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11706                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11707         }
11708         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11709                 tp->read32_mbox = tg3_read32_mbox_5906;
11710                 tp->write32_mbox = tg3_write32_mbox_5906;
11711                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11712                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11713         }
11714
11715         if (tp->write32 == tg3_write_indirect_reg32 ||
11716             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11717              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11718               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11719                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11720
11721         /* Get eeprom hw config before calling tg3_set_power_state().
11722          * In particular, the TG3_FLG2_IS_NIC flag must be
11723          * determined before calling tg3_set_power_state() so that
11724          * we know whether or not to switch out of Vaux power.
11725          * When the flag is set, it means that GPIO1 is used for eeprom
11726          * write protect and also implies that it is a LOM where GPIOs
11727          * are not used to switch power.
11728          */
11729         tg3_get_eeprom_hw_cfg(tp);
11730
11731         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11732                 /* Allow reads and writes to the
11733                  * APE register and memory space.
11734                  */
11735                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11736                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11737                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11738                                        pci_state_reg);
11739         }
11740
11741         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11742             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11743                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11744
11745                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11746                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11747                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11748                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11749                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11750         }
11751
11752         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11753          * GPIO1 driven high will bring 5700's external PHY out of reset.
11754          * It is also used as eeprom write protect on LOMs.
11755          */
11756         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11757         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11758             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11759                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11760                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11761         /* Unused GPIO3 must be driven as output on 5752 because there
11762          * are no pull-up resistors on unused GPIO pins.
11763          */
11764         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11765                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11766
11767         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11768                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11769
11770         /* Force the chip into D0. */
11771         err = tg3_set_power_state(tp, PCI_D0);
11772         if (err) {
11773                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11774                        pci_name(tp->pdev));
11775                 return err;
11776         }
11777
11778         /* 5700 B0 chips do not support checksumming correctly due
11779          * to hardware bugs.
11780          */
11781         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11782                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11783
11784         /* Derive initial jumbo mode from MTU assigned in
11785          * ether_setup() via the alloc_etherdev() call
11786          */
11787         if (tp->dev->mtu > ETH_DATA_LEN &&
11788             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11789                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11790
11791         /* Determine WakeOnLan speed to use. */
11792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11793             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11794             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11795             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11796                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11797         } else {
11798                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11799         }
11800
11801         /* A few boards don't want Ethernet@WireSpeed phy feature */
11802         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11803             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11804              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11805              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11806             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11807             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11808                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11809
11810         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11811             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11812                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11813         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11814                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11815
11816         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11817                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11818                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11819                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11820                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11821                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11822                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11823                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11824                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11825                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11826                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11827                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11828         }
11829
11830         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11831             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11832                 tp->phy_otp = tg3_read_otp_phycfg(tp);
11833                 if (tp->phy_otp == 0)
11834                         tp->phy_otp = TG3_OTP_DEFAULT;
11835         }
11836
11837         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11838             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11839                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11840         else
11841                 tp->mi_mode = MAC_MI_MODE_BASE;
11842
11843         tp->coalesce_mode = 0;
11844         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11845             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11846                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11847
11848         /* Initialize MAC MI mode, polling disabled. */
11849         tw32_f(MAC_MI_MODE, tp->mi_mode);
11850         udelay(80);
11851
11852         /* Initialize data/descriptor byte/word swapping. */
11853         val = tr32(GRC_MODE);
11854         val &= GRC_MODE_HOST_STACKUP;
11855         tw32(GRC_MODE, val | tp->grc_mode);
11856
11857         tg3_switch_clocks(tp);
11858
11859         /* Clear this out for sanity. */
11860         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11861
11862         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11863                               &pci_state_reg);
11864         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11865             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11866                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11867
11868                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11869                     chiprevid == CHIPREV_ID_5701_B0 ||
11870                     chiprevid == CHIPREV_ID_5701_B2 ||
11871                     chiprevid == CHIPREV_ID_5701_B5) {
11872                         void __iomem *sram_base;
11873
11874                         /* Write some dummy words into the SRAM status block
11875                          * area, see if it reads back correctly.  If the return
11876                          * value is bad, force enable the PCIX workaround.
11877                          */
11878                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11879
11880                         writel(0x00000000, sram_base);
11881                         writel(0x00000000, sram_base + 4);
11882                         writel(0xffffffff, sram_base + 4);
11883                         if (readl(sram_base) != 0x00000000)
11884                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11885                 }
11886         }
11887
11888         udelay(50);
11889         tg3_nvram_init(tp);
11890
11891         grc_misc_cfg = tr32(GRC_MISC_CFG);
11892         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11893
11894         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11895             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11896              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11897                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11898
11899         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11900             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11901                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11902         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11903                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11904                                       HOSTCC_MODE_CLRTICK_TXBD);
11905
11906                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11907                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11908                                        tp->misc_host_ctrl);
11909         }
11910
11911         /* these are limited to 10/100 only */
11912         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11913              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11914             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11915              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11916              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11917               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11918               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11919             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11920              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11921               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11922               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11923             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11924                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11925
11926         err = tg3_phy_probe(tp);
11927         if (err) {
11928                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11929                        pci_name(tp->pdev), err);
11930                 /* ... but do not return immediately ... */
11931         }
11932
11933         tg3_read_partno(tp);
11934         tg3_read_fw_ver(tp);
11935
11936         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11937                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11938         } else {
11939                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11940                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11941                 else
11942                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11943         }
11944
11945         /* 5700 {AX,BX} chips have a broken status block link
11946          * change bit implementation, so we must use the
11947          * status register in those cases.
11948          */
11949         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11950                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11951         else
11952                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11953
11954         /* The led_ctrl is set during tg3_phy_probe, here we might
11955          * have to force the link status polling mechanism based
11956          * upon subsystem IDs.
11957          */
11958         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11959             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11960             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11961                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11962                                   TG3_FLAG_USE_LINKCHG_REG);
11963         }
11964
11965         /* For all SERDES we poll the MAC status register. */
11966         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11967                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11968         else
11969                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11970
11971         /* All chips before 5787 can get confused if TX buffers
11972          * straddle the 4GB address boundary in some cases.
11973          */
11974         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11975             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11977             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11978             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11979                 tp->dev->hard_start_xmit = tg3_start_xmit;
11980         else
11981                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11982
11983         tp->rx_offset = 2;
11984         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11985             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11986                 tp->rx_offset = 0;
11987
11988         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11989
11990         /* Increment the rx prod index on the rx std ring by at most
11991          * 8 for these chips to workaround hw errata.
11992          */
11993         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11994             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11995             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11996                 tp->rx_std_max_post = 8;
11997
11998         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11999                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12000                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12001
12002         return err;
12003 }
12004
12005 #ifdef CONFIG_SPARC
12006 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12007 {
12008         struct net_device *dev = tp->dev;
12009         struct pci_dev *pdev = tp->pdev;
12010         struct device_node *dp = pci_device_to_OF_node(pdev);
12011         const unsigned char *addr;
12012         int len;
12013
12014         addr = of_get_property(dp, "local-mac-address", &len);
12015         if (addr && len == 6) {
12016                 memcpy(dev->dev_addr, addr, 6);
12017                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12018                 return 0;
12019         }
12020         return -ENODEV;
12021 }
12022
12023 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12024 {
12025         struct net_device *dev = tp->dev;
12026
12027         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12028         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12029         return 0;
12030 }
12031 #endif
12032
12033 static int __devinit tg3_get_device_address(struct tg3 *tp)
12034 {
12035         struct net_device *dev = tp->dev;
12036         u32 hi, lo, mac_offset;
12037         int addr_ok = 0;
12038
12039 #ifdef CONFIG_SPARC
12040         if (!tg3_get_macaddr_sparc(tp))
12041                 return 0;
12042 #endif
12043
12044         mac_offset = 0x7c;
12045         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12046             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12047                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12048                         mac_offset = 0xcc;
12049                 if (tg3_nvram_lock(tp))
12050                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12051                 else
12052                         tg3_nvram_unlock(tp);
12053         }
12054         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12055                 mac_offset = 0x10;
12056
12057         /* First try to get it from MAC address mailbox. */
12058         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12059         if ((hi >> 16) == 0x484b) {
12060                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12061                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12062
12063                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12064                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12065                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12066                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12067                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12068
12069                 /* Some old bootcode may report a 0 MAC address in SRAM */
12070                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12071         }
12072         if (!addr_ok) {
12073                 /* Next, try NVRAM. */
12074                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12075                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12076                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12077                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12078                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12079                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12080                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12081                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12082                 }
12083                 /* Finally just fetch it out of the MAC control regs. */
12084                 else {
12085                         hi = tr32(MAC_ADDR_0_HIGH);
12086                         lo = tr32(MAC_ADDR_0_LOW);
12087
12088                         dev->dev_addr[5] = lo & 0xff;
12089                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12090                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12091                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12092                         dev->dev_addr[1] = hi & 0xff;
12093                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12094                 }
12095         }
12096
12097         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12098 #ifdef CONFIG_SPARC
12099                 if (!tg3_get_default_macaddr_sparc(tp))
12100                         return 0;
12101 #endif
12102                 return -EINVAL;
12103         }
12104         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12105         return 0;
12106 }
12107
12108 #define BOUNDARY_SINGLE_CACHELINE       1
12109 #define BOUNDARY_MULTI_CACHELINE        2
12110
12111 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12112 {
12113         int cacheline_size;
12114         u8 byte;
12115         int goal;
12116
12117         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12118         if (byte == 0)
12119                 cacheline_size = 1024;
12120         else
12121                 cacheline_size = (int) byte * 4;
12122
12123         /* On 5703 and later chips, the boundary bits have no
12124          * effect.
12125          */
12126         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12127             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12128             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12129                 goto out;
12130
12131 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12132         goal = BOUNDARY_MULTI_CACHELINE;
12133 #else
12134 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12135         goal = BOUNDARY_SINGLE_CACHELINE;
12136 #else
12137         goal = 0;
12138 #endif
12139 #endif
12140
12141         if (!goal)
12142                 goto out;
12143
12144         /* PCI controllers on most RISC systems tend to disconnect
12145          * when a device tries to burst across a cache-line boundary.
12146          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12147          *
12148          * Unfortunately, for PCI-E there are only limited
12149          * write-side controls for this, and thus for reads
12150          * we will still get the disconnects.  We'll also waste
12151          * these PCI cycles for both read and write for chips
12152          * other than 5700 and 5701 which do not implement the
12153          * boundary bits.
12154          */
12155         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12156             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12157                 switch (cacheline_size) {
12158                 case 16:
12159                 case 32:
12160                 case 64:
12161                 case 128:
12162                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12163                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12164                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12165                         } else {
12166                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12167                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12168                         }
12169                         break;
12170
12171                 case 256:
12172                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12173                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12174                         break;
12175
12176                 default:
12177                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12178                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12179                         break;
12180                 };
12181         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12182                 switch (cacheline_size) {
12183                 case 16:
12184                 case 32:
12185                 case 64:
12186                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12187                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12188                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12189                                 break;
12190                         }
12191                         /* fallthrough */
12192                 case 128:
12193                 default:
12194                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12195                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12196                         break;
12197                 };
12198         } else {
12199                 switch (cacheline_size) {
12200                 case 16:
12201                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12202                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12203                                         DMA_RWCTRL_WRITE_BNDRY_16);
12204                                 break;
12205                         }
12206                         /* fallthrough */
12207                 case 32:
12208                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12209                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12210                                         DMA_RWCTRL_WRITE_BNDRY_32);
12211                                 break;
12212                         }
12213                         /* fallthrough */
12214                 case 64:
12215                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12216                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12217                                         DMA_RWCTRL_WRITE_BNDRY_64);
12218                                 break;
12219                         }
12220                         /* fallthrough */
12221                 case 128:
12222                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12223                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12224                                         DMA_RWCTRL_WRITE_BNDRY_128);
12225                                 break;
12226                         }
12227                         /* fallthrough */
12228                 case 256:
12229                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12230                                 DMA_RWCTRL_WRITE_BNDRY_256);
12231                         break;
12232                 case 512:
12233                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12234                                 DMA_RWCTRL_WRITE_BNDRY_512);
12235                         break;
12236                 case 1024:
12237                 default:
12238                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12239                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12240                         break;
12241                 };
12242         }
12243
12244 out:
12245         return val;
12246 }
12247
12248 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12249 {
12250         struct tg3_internal_buffer_desc test_desc;
12251         u32 sram_dma_descs;
12252         int i, ret;
12253
12254         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12255
12256         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12257         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12258         tw32(RDMAC_STATUS, 0);
12259         tw32(WDMAC_STATUS, 0);
12260
12261         tw32(BUFMGR_MODE, 0);
12262         tw32(FTQ_RESET, 0);
12263
12264         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12265         test_desc.addr_lo = buf_dma & 0xffffffff;
12266         test_desc.nic_mbuf = 0x00002100;
12267         test_desc.len = size;
12268
12269         /*
12270          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12271          * the *second* time the tg3 driver was getting loaded after an
12272          * initial scan.
12273          *
12274          * Broadcom tells me:
12275          *   ...the DMA engine is connected to the GRC block and a DMA
12276          *   reset may affect the GRC block in some unpredictable way...
12277          *   The behavior of resets to individual blocks has not been tested.
12278          *
12279          * Broadcom noted the GRC reset will also reset all sub-components.
12280          */
12281         if (to_device) {
12282                 test_desc.cqid_sqid = (13 << 8) | 2;
12283
12284                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12285                 udelay(40);
12286         } else {
12287                 test_desc.cqid_sqid = (16 << 8) | 7;
12288
12289                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12290                 udelay(40);
12291         }
12292         test_desc.flags = 0x00000005;
12293
12294         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12295                 u32 val;
12296
12297                 val = *(((u32 *)&test_desc) + i);
12298                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12299                                        sram_dma_descs + (i * sizeof(u32)));
12300                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12301         }
12302         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12303
12304         if (to_device) {
12305                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12306         } else {
12307                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12308         }
12309
12310         ret = -ENODEV;
12311         for (i = 0; i < 40; i++) {
12312                 u32 val;
12313
12314                 if (to_device)
12315                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12316                 else
12317                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12318                 if ((val & 0xffff) == sram_dma_descs) {
12319                         ret = 0;
12320                         break;
12321                 }
12322
12323                 udelay(100);
12324         }
12325
12326         return ret;
12327 }
12328
12329 #define TEST_BUFFER_SIZE        0x2000
12330
12331 static int __devinit tg3_test_dma(struct tg3 *tp)
12332 {
12333         dma_addr_t buf_dma;
12334         u32 *buf, saved_dma_rwctrl;
12335         int ret;
12336
12337         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12338         if (!buf) {
12339                 ret = -ENOMEM;
12340                 goto out_nofree;
12341         }
12342
12343         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12344                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12345
12346         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12347
12348         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12349                 /* DMA read watermark not used on PCIE */
12350                 tp->dma_rwctrl |= 0x00180000;
12351         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12352                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12353                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12354                         tp->dma_rwctrl |= 0x003f0000;
12355                 else
12356                         tp->dma_rwctrl |= 0x003f000f;
12357         } else {
12358                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12359                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12360                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12361                         u32 read_water = 0x7;
12362
12363                         /* If the 5704 is behind the EPB bridge, we can
12364                          * do the less restrictive ONE_DMA workaround for
12365                          * better performance.
12366                          */
12367                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12368                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12369                                 tp->dma_rwctrl |= 0x8000;
12370                         else if (ccval == 0x6 || ccval == 0x7)
12371                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12372
12373                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12374                                 read_water = 4;
12375                         /* Set bit 23 to enable PCIX hw bug fix */
12376                         tp->dma_rwctrl |=
12377                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12378                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12379                                 (1 << 23);
12380                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12381                         /* 5780 always in PCIX mode */
12382                         tp->dma_rwctrl |= 0x00144000;
12383                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12384                         /* 5714 always in PCIX mode */
12385                         tp->dma_rwctrl |= 0x00148000;
12386                 } else {
12387                         tp->dma_rwctrl |= 0x001b000f;
12388                 }
12389         }
12390
12391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12392             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12393                 tp->dma_rwctrl &= 0xfffffff0;
12394
12395         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12396             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12397                 /* Remove this if it causes problems for some boards. */
12398                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12399
12400                 /* On 5700/5701 chips, we need to set this bit.
12401                  * Otherwise the chip will issue cacheline transactions
12402                  * to streamable DMA memory with not all the byte
12403                  * enables turned on.  This is an error on several
12404                  * RISC PCI controllers, in particular sparc64.
12405                  *
12406                  * On 5703/5704 chips, this bit has been reassigned
12407                  * a different meaning.  In particular, it is used
12408                  * on those chips to enable a PCI-X workaround.
12409                  */
12410                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12411         }
12412
12413         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12414
12415 #if 0
12416         /* Unneeded, already done by tg3_get_invariants.  */
12417         tg3_switch_clocks(tp);
12418 #endif
12419
12420         ret = 0;
12421         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12422             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12423                 goto out;
12424
12425         /* It is best to perform DMA test with maximum write burst size
12426          * to expose the 5700/5701 write DMA bug.
12427          */
12428         saved_dma_rwctrl = tp->dma_rwctrl;
12429         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12430         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12431
12432         while (1) {
12433                 u32 *p = buf, i;
12434
12435                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12436                         p[i] = i;
12437
12438                 /* Send the buffer to the chip. */
12439                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12440                 if (ret) {
12441                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12442                         break;
12443                 }
12444
12445 #if 0
12446                 /* validate data reached card RAM correctly. */
12447                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12448                         u32 val;
12449                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12450                         if (le32_to_cpu(val) != p[i]) {
12451                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12452                                 /* ret = -ENODEV here? */
12453                         }
12454                         p[i] = 0;
12455                 }
12456 #endif
12457                 /* Now read it back. */
12458                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12459                 if (ret) {
12460                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12461
12462                         break;
12463                 }
12464
12465                 /* Verify it. */
12466                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12467                         if (p[i] == i)
12468                                 continue;
12469
12470                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12471                             DMA_RWCTRL_WRITE_BNDRY_16) {
12472                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12473                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12474                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12475                                 break;
12476                         } else {
12477                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12478                                 ret = -ENODEV;
12479                                 goto out;
12480                         }
12481                 }
12482
12483                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12484                         /* Success. */
12485                         ret = 0;
12486                         break;
12487                 }
12488         }
12489         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12490             DMA_RWCTRL_WRITE_BNDRY_16) {
12491                 static struct pci_device_id dma_wait_state_chipsets[] = {
12492                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12493                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12494                         { },
12495                 };
12496
12497                 /* DMA test passed without adjusting DMA boundary,
12498                  * now look for chipsets that are known to expose the
12499                  * DMA bug without failing the test.
12500                  */
12501                 if (pci_dev_present(dma_wait_state_chipsets)) {
12502                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12503                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12504                 }
12505                 else
12506                         /* Safe to use the calculated DMA boundary. */
12507                         tp->dma_rwctrl = saved_dma_rwctrl;
12508
12509                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12510         }
12511
12512 out:
12513         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12514 out_nofree:
12515         return ret;
12516 }
12517
12518 static void __devinit tg3_init_link_config(struct tg3 *tp)
12519 {
12520         tp->link_config.advertising =
12521                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12522                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12523                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12524                  ADVERTISED_Autoneg | ADVERTISED_MII);
12525         tp->link_config.speed = SPEED_INVALID;
12526         tp->link_config.duplex = DUPLEX_INVALID;
12527         tp->link_config.autoneg = AUTONEG_ENABLE;
12528         tp->link_config.active_speed = SPEED_INVALID;
12529         tp->link_config.active_duplex = DUPLEX_INVALID;
12530         tp->link_config.phy_is_low_power = 0;
12531         tp->link_config.orig_speed = SPEED_INVALID;
12532         tp->link_config.orig_duplex = DUPLEX_INVALID;
12533         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12534 }
12535
12536 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12537 {
12538         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12539                 tp->bufmgr_config.mbuf_read_dma_low_water =
12540                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12541                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12542                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12543                 tp->bufmgr_config.mbuf_high_water =
12544                         DEFAULT_MB_HIGH_WATER_5705;
12545                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12546                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12547                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12548                         tp->bufmgr_config.mbuf_high_water =
12549                                 DEFAULT_MB_HIGH_WATER_5906;
12550                 }
12551
12552                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12553                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12554                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12555                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12556                 tp->bufmgr_config.mbuf_high_water_jumbo =
12557                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12558         } else {
12559                 tp->bufmgr_config.mbuf_read_dma_low_water =
12560                         DEFAULT_MB_RDMA_LOW_WATER;
12561                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12562                         DEFAULT_MB_MACRX_LOW_WATER;
12563                 tp->bufmgr_config.mbuf_high_water =
12564                         DEFAULT_MB_HIGH_WATER;
12565
12566                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12567                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12568                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12569                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12570                 tp->bufmgr_config.mbuf_high_water_jumbo =
12571                         DEFAULT_MB_HIGH_WATER_JUMBO;
12572         }
12573
12574         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12575         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12576 }
12577
12578 static char * __devinit tg3_phy_string(struct tg3 *tp)
12579 {
12580         switch (tp->phy_id & PHY_ID_MASK) {
12581         case PHY_ID_BCM5400:    return "5400";
12582         case PHY_ID_BCM5401:    return "5401";
12583         case PHY_ID_BCM5411:    return "5411";
12584         case PHY_ID_BCM5701:    return "5701";
12585         case PHY_ID_BCM5703:    return "5703";
12586         case PHY_ID_BCM5704:    return "5704";
12587         case PHY_ID_BCM5705:    return "5705";
12588         case PHY_ID_BCM5750:    return "5750";
12589         case PHY_ID_BCM5752:    return "5752";
12590         case PHY_ID_BCM5714:    return "5714";
12591         case PHY_ID_BCM5780:    return "5780";
12592         case PHY_ID_BCM5755:    return "5755";
12593         case PHY_ID_BCM5787:    return "5787";
12594         case PHY_ID_BCM5784:    return "5784";
12595         case PHY_ID_BCM5756:    return "5722/5756";
12596         case PHY_ID_BCM5906:    return "5906";
12597         case PHY_ID_BCM5761:    return "5761";
12598         case PHY_ID_BCM8002:    return "8002/serdes";
12599         case 0:                 return "serdes";
12600         default:                return "unknown";
12601         };
12602 }
12603
12604 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12605 {
12606         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12607                 strcpy(str, "PCI Express");
12608                 return str;
12609         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12610                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12611
12612                 strcpy(str, "PCIX:");
12613
12614                 if ((clock_ctrl == 7) ||
12615                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12616                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12617                         strcat(str, "133MHz");
12618                 else if (clock_ctrl == 0)
12619                         strcat(str, "33MHz");
12620                 else if (clock_ctrl == 2)
12621                         strcat(str, "50MHz");
12622                 else if (clock_ctrl == 4)
12623                         strcat(str, "66MHz");
12624                 else if (clock_ctrl == 6)
12625                         strcat(str, "100MHz");
12626         } else {
12627                 strcpy(str, "PCI:");
12628                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12629                         strcat(str, "66MHz");
12630                 else
12631                         strcat(str, "33MHz");
12632         }
12633         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12634                 strcat(str, ":32-bit");
12635         else
12636                 strcat(str, ":64-bit");
12637         return str;
12638 }
12639
12640 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12641 {
12642         struct pci_dev *peer;
12643         unsigned int func, devnr = tp->pdev->devfn & ~7;
12644
12645         for (func = 0; func < 8; func++) {
12646                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12647                 if (peer && peer != tp->pdev)
12648                         break;
12649                 pci_dev_put(peer);
12650         }
12651         /* 5704 can be configured in single-port mode, set peer to
12652          * tp->pdev in that case.
12653          */
12654         if (!peer) {
12655                 peer = tp->pdev;
12656                 return peer;
12657         }
12658
12659         /*
12660          * We don't need to keep the refcount elevated; there's no way
12661          * to remove one half of this device without removing the other
12662          */
12663         pci_dev_put(peer);
12664
12665         return peer;
12666 }
12667
12668 static void __devinit tg3_init_coal(struct tg3 *tp)
12669 {
12670         struct ethtool_coalesce *ec = &tp->coal;
12671
12672         memset(ec, 0, sizeof(*ec));
12673         ec->cmd = ETHTOOL_GCOALESCE;
12674         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12675         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12676         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12677         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12678         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12679         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12680         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12681         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12682         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12683
12684         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12685                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12686                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12687                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12688                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12689                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12690         }
12691
12692         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12693                 ec->rx_coalesce_usecs_irq = 0;
12694                 ec->tx_coalesce_usecs_irq = 0;
12695                 ec->stats_block_coalesce_usecs = 0;
12696         }
12697 }
12698
12699 static int __devinit tg3_init_one(struct pci_dev *pdev,
12700                                   const struct pci_device_id *ent)
12701 {
12702         static int tg3_version_printed = 0;
12703         resource_size_t tg3reg_base;
12704         unsigned long tg3reg_len;
12705         struct net_device *dev;
12706         struct tg3 *tp;
12707         int err, pm_cap;
12708         char str[40];
12709         u64 dma_mask, persist_dma_mask;
12710         DECLARE_MAC_BUF(mac);
12711
12712         if (tg3_version_printed++ == 0)
12713                 printk(KERN_INFO "%s", version);
12714
12715         err = pci_enable_device(pdev);
12716         if (err) {
12717                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12718                        "aborting.\n");
12719                 return err;
12720         }
12721
12722         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12723                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12724                        "base address, aborting.\n");
12725                 err = -ENODEV;
12726                 goto err_out_disable_pdev;
12727         }
12728
12729         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12730         if (err) {
12731                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12732                        "aborting.\n");
12733                 goto err_out_disable_pdev;
12734         }
12735
12736         pci_set_master(pdev);
12737
12738         /* Find power-management capability. */
12739         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12740         if (pm_cap == 0) {
12741                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12742                        "aborting.\n");
12743                 err = -EIO;
12744                 goto err_out_free_res;
12745         }
12746
12747         tg3reg_base = pci_resource_start(pdev, 0);
12748         tg3reg_len = pci_resource_len(pdev, 0);
12749
12750         dev = alloc_etherdev(sizeof(*tp));
12751         if (!dev) {
12752                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12753                 err = -ENOMEM;
12754                 goto err_out_free_res;
12755         }
12756
12757         SET_NETDEV_DEV(dev, &pdev->dev);
12758
12759 #if TG3_VLAN_TAG_USED
12760         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12761         dev->vlan_rx_register = tg3_vlan_rx_register;
12762 #endif
12763
12764         tp = netdev_priv(dev);
12765         tp->pdev = pdev;
12766         tp->dev = dev;
12767         tp->pm_cap = pm_cap;
12768         tp->mac_mode = TG3_DEF_MAC_MODE;
12769         tp->rx_mode = TG3_DEF_RX_MODE;
12770         tp->tx_mode = TG3_DEF_TX_MODE;
12771
12772         if (tg3_debug > 0)
12773                 tp->msg_enable = tg3_debug;
12774         else
12775                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12776
12777         /* The word/byte swap controls here control register access byte
12778          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12779          * setting below.
12780          */
12781         tp->misc_host_ctrl =
12782                 MISC_HOST_CTRL_MASK_PCI_INT |
12783                 MISC_HOST_CTRL_WORD_SWAP |
12784                 MISC_HOST_CTRL_INDIR_ACCESS |
12785                 MISC_HOST_CTRL_PCISTATE_RW;
12786
12787         /* The NONFRM (non-frame) byte/word swap controls take effect
12788          * on descriptor entries, anything which isn't packet data.
12789          *
12790          * The StrongARM chips on the board (one for tx, one for rx)
12791          * are running in big-endian mode.
12792          */
12793         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12794                         GRC_MODE_WSWAP_NONFRM_DATA);
12795 #ifdef __BIG_ENDIAN
12796         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12797 #endif
12798         spin_lock_init(&tp->lock);
12799         spin_lock_init(&tp->indirect_lock);
12800         INIT_WORK(&tp->reset_task, tg3_reset_task);
12801
12802         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12803         if (!tp->regs) {
12804                 printk(KERN_ERR PFX "Cannot map device registers, "
12805                        "aborting.\n");
12806                 err = -ENOMEM;
12807                 goto err_out_free_dev;
12808         }
12809
12810         tg3_init_link_config(tp);
12811
12812         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12813         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12814         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12815
12816         dev->open = tg3_open;
12817         dev->stop = tg3_close;
12818         dev->get_stats = tg3_get_stats;
12819         dev->set_multicast_list = tg3_set_rx_mode;
12820         dev->set_mac_address = tg3_set_mac_addr;
12821         dev->do_ioctl = tg3_ioctl;
12822         dev->tx_timeout = tg3_tx_timeout;
12823         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12824         dev->ethtool_ops = &tg3_ethtool_ops;
12825         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12826         dev->change_mtu = tg3_change_mtu;
12827         dev->irq = pdev->irq;
12828 #ifdef CONFIG_NET_POLL_CONTROLLER
12829         dev->poll_controller = tg3_poll_controller;
12830 #endif
12831
12832         err = tg3_get_invariants(tp);
12833         if (err) {
12834                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12835                        "aborting.\n");
12836                 goto err_out_iounmap;
12837         }
12838
12839         /* The EPB bridge inside 5714, 5715, and 5780 and any
12840          * device behind the EPB cannot support DMA addresses > 40-bit.
12841          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12842          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12843          * do DMA address check in tg3_start_xmit().
12844          */
12845         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12846                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12847         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12848                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12849 #ifdef CONFIG_HIGHMEM
12850                 dma_mask = DMA_64BIT_MASK;
12851 #endif
12852         } else
12853                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12854
12855         /* Configure DMA attributes. */
12856         if (dma_mask > DMA_32BIT_MASK) {
12857                 err = pci_set_dma_mask(pdev, dma_mask);
12858                 if (!err) {
12859                         dev->features |= NETIF_F_HIGHDMA;
12860                         err = pci_set_consistent_dma_mask(pdev,
12861                                                           persist_dma_mask);
12862                         if (err < 0) {
12863                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12864                                        "DMA for consistent allocations\n");
12865                                 goto err_out_iounmap;
12866                         }
12867                 }
12868         }
12869         if (err || dma_mask == DMA_32BIT_MASK) {
12870                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12871                 if (err) {
12872                         printk(KERN_ERR PFX "No usable DMA configuration, "
12873                                "aborting.\n");
12874                         goto err_out_iounmap;
12875                 }
12876         }
12877
12878         tg3_init_bufmgr_config(tp);
12879
12880         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12881                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12882         }
12883         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12884             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12885             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12886             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12887             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12888                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12889         } else {
12890                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12891         }
12892
12893         /* TSO is on by default on chips that support hardware TSO.
12894          * Firmware TSO on older chips gives lower performance, so it
12895          * is off by default, but can be enabled using ethtool.
12896          */
12897         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12898                 dev->features |= NETIF_F_TSO;
12899                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12900                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12901                         dev->features |= NETIF_F_TSO6;
12902                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12903                         dev->features |= NETIF_F_TSO_ECN;
12904         }
12905
12906
12907         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12908             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12909             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12910                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12911                 tp->rx_pending = 63;
12912         }
12913
12914         err = tg3_get_device_address(tp);
12915         if (err) {
12916                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12917                        "aborting.\n");
12918                 goto err_out_iounmap;
12919         }
12920
12921         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12922                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12923                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12924                                "base address for APE, aborting.\n");
12925                         err = -ENODEV;
12926                         goto err_out_iounmap;
12927                 }
12928
12929                 tg3reg_base = pci_resource_start(pdev, 2);
12930                 tg3reg_len = pci_resource_len(pdev, 2);
12931
12932                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12933                 if (!tp->aperegs) {
12934                         printk(KERN_ERR PFX "Cannot map APE registers, "
12935                                "aborting.\n");
12936                         err = -ENOMEM;
12937                         goto err_out_iounmap;
12938                 }
12939
12940                 tg3_ape_lock_init(tp);
12941         }
12942
12943         /*
12944          * Reset chip in case UNDI or EFI driver did not shutdown
12945          * DMA self test will enable WDMAC and we'll see (spurious)
12946          * pending DMA on the PCI bus at that point.
12947          */
12948         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12949             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12950                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12951                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12952         }
12953
12954         err = tg3_test_dma(tp);
12955         if (err) {
12956                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12957                 goto err_out_apeunmap;
12958         }
12959
12960         /* Tigon3 can do ipv4 only... and some chips have buggy
12961          * checksumming.
12962          */
12963         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12964                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12965                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12966                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12967                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12968                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12969                         dev->features |= NETIF_F_IPV6_CSUM;
12970
12971                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12972         } else
12973                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12974
12975         /* flow control autonegotiation is default behavior */
12976         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12977         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
12978
12979         tg3_init_coal(tp);
12980
12981         pci_set_drvdata(pdev, dev);
12982
12983         err = register_netdev(dev);
12984         if (err) {
12985                 printk(KERN_ERR PFX "Cannot register net device, "
12986                        "aborting.\n");
12987                 goto err_out_apeunmap;
12988         }
12989
12990         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12991                "(%s) %s Ethernet %s\n",
12992                dev->name,
12993                tp->board_part_number,
12994                tp->pci_chip_rev_id,
12995                tg3_phy_string(tp),
12996                tg3_bus_string(tp, str),
12997                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12998                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12999                  "10/100/1000Base-T")),
13000                print_mac(mac, dev->dev_addr));
13001
13002         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13003                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13004                dev->name,
13005                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13006                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13007                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13008                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13009                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13010                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13011         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13012                dev->name, tp->dma_rwctrl,
13013                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13014                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13015
13016         return 0;
13017
13018 err_out_apeunmap:
13019         if (tp->aperegs) {
13020                 iounmap(tp->aperegs);
13021                 tp->aperegs = NULL;
13022         }
13023
13024 err_out_iounmap:
13025         if (tp->regs) {
13026                 iounmap(tp->regs);
13027                 tp->regs = NULL;
13028         }
13029
13030 err_out_free_dev:
13031         free_netdev(dev);
13032
13033 err_out_free_res:
13034         pci_release_regions(pdev);
13035
13036 err_out_disable_pdev:
13037         pci_disable_device(pdev);
13038         pci_set_drvdata(pdev, NULL);
13039         return err;
13040 }
13041
13042 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13043 {
13044         struct net_device *dev = pci_get_drvdata(pdev);
13045
13046         if (dev) {
13047                 struct tg3 *tp = netdev_priv(dev);
13048
13049                 flush_scheduled_work();
13050                 unregister_netdev(dev);
13051                 if (tp->aperegs) {
13052                         iounmap(tp->aperegs);
13053                         tp->aperegs = NULL;
13054                 }
13055                 if (tp->regs) {
13056                         iounmap(tp->regs);
13057                         tp->regs = NULL;
13058                 }
13059                 free_netdev(dev);
13060                 pci_release_regions(pdev);
13061                 pci_disable_device(pdev);
13062                 pci_set_drvdata(pdev, NULL);
13063         }
13064 }
13065
13066 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13067 {
13068         struct net_device *dev = pci_get_drvdata(pdev);
13069         struct tg3 *tp = netdev_priv(dev);
13070         int err;
13071
13072         /* PCI register 4 needs to be saved whether netif_running() or not.
13073          * MSI address and data need to be saved if using MSI and
13074          * netif_running().
13075          */
13076         pci_save_state(pdev);
13077
13078         if (!netif_running(dev))
13079                 return 0;
13080
13081         flush_scheduled_work();
13082         tg3_netif_stop(tp);
13083
13084         del_timer_sync(&tp->timer);
13085
13086         tg3_full_lock(tp, 1);
13087         tg3_disable_ints(tp);
13088         tg3_full_unlock(tp);
13089
13090         netif_device_detach(dev);
13091
13092         tg3_full_lock(tp, 0);
13093         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13094         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13095         tg3_full_unlock(tp);
13096
13097         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13098         if (err) {
13099                 tg3_full_lock(tp, 0);
13100
13101                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13102                 if (tg3_restart_hw(tp, 1))
13103                         goto out;
13104
13105                 tp->timer.expires = jiffies + tp->timer_offset;
13106                 add_timer(&tp->timer);
13107
13108                 netif_device_attach(dev);
13109                 tg3_netif_start(tp);
13110
13111 out:
13112                 tg3_full_unlock(tp);
13113         }
13114
13115         return err;
13116 }
13117
13118 static int tg3_resume(struct pci_dev *pdev)
13119 {
13120         struct net_device *dev = pci_get_drvdata(pdev);
13121         struct tg3 *tp = netdev_priv(dev);
13122         int err;
13123
13124         pci_restore_state(tp->pdev);
13125
13126         if (!netif_running(dev))
13127                 return 0;
13128
13129         err = tg3_set_power_state(tp, PCI_D0);
13130         if (err)
13131                 return err;
13132
13133         netif_device_attach(dev);
13134
13135         tg3_full_lock(tp, 0);
13136
13137         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13138         err = tg3_restart_hw(tp, 1);
13139         if (err)
13140                 goto out;
13141
13142         tp->timer.expires = jiffies + tp->timer_offset;
13143         add_timer(&tp->timer);
13144
13145         tg3_netif_start(tp);
13146
13147 out:
13148         tg3_full_unlock(tp);
13149
13150         return err;
13151 }
13152
13153 static struct pci_driver tg3_driver = {
13154         .name           = DRV_MODULE_NAME,
13155         .id_table       = tg3_pci_tbl,
13156         .probe          = tg3_init_one,
13157         .remove         = __devexit_p(tg3_remove_one),
13158         .suspend        = tg3_suspend,
13159         .resume         = tg3_resume
13160 };
13161
13162 static int __init tg3_init(void)
13163 {
13164         return pci_register_driver(&tg3_driver);
13165 }
13166
13167 static void __exit tg3_cleanup(void)
13168 {
13169         pci_unregister_driver(&tg3_driver);
13170 }
13171
13172 module_init(tg3_init);
13173 module_exit(tg3_cleanup);