tg3: Allow WOL for phylib controlled Broadcom phys
[safe/jmp/linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #define BAR_0   0
58 #define BAR_2   2
59
60 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61 #define TG3_VLAN_TAG_USED 1
62 #else
63 #define TG3_VLAN_TAG_USED 0
64 #endif
65
66 #define TG3_TSO_SUPPORT 1
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.94"
73 #define DRV_MODULE_RELDATE      "August 14, 2008"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
134
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138 #define TG3_NUM_TEST            6
139
140 static char version[] __devinitdata =
141         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145 MODULE_LICENSE("GPL");
146 MODULE_VERSION(DRV_MODULE_VERSION);
147
148 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
149 module_param(tg3_debug, int, 0);
150 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152 static struct pci_device_id tg3_pci_tbl[] = {
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
214         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221         {}
222 };
223
224 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225
226 static const struct {
227         const char string[ETH_GSTRING_LEN];
228 } ethtool_stats_keys[TG3_NUM_STATS] = {
229         { "rx_octets" },
230         { "rx_fragments" },
231         { "rx_ucast_packets" },
232         { "rx_mcast_packets" },
233         { "rx_bcast_packets" },
234         { "rx_fcs_errors" },
235         { "rx_align_errors" },
236         { "rx_xon_pause_rcvd" },
237         { "rx_xoff_pause_rcvd" },
238         { "rx_mac_ctrl_rcvd" },
239         { "rx_xoff_entered" },
240         { "rx_frame_too_long_errors" },
241         { "rx_jabbers" },
242         { "rx_undersize_packets" },
243         { "rx_in_length_errors" },
244         { "rx_out_length_errors" },
245         { "rx_64_or_less_octet_packets" },
246         { "rx_65_to_127_octet_packets" },
247         { "rx_128_to_255_octet_packets" },
248         { "rx_256_to_511_octet_packets" },
249         { "rx_512_to_1023_octet_packets" },
250         { "rx_1024_to_1522_octet_packets" },
251         { "rx_1523_to_2047_octet_packets" },
252         { "rx_2048_to_4095_octet_packets" },
253         { "rx_4096_to_8191_octet_packets" },
254         { "rx_8192_to_9022_octet_packets" },
255
256         { "tx_octets" },
257         { "tx_collisions" },
258
259         { "tx_xon_sent" },
260         { "tx_xoff_sent" },
261         { "tx_flow_control" },
262         { "tx_mac_errors" },
263         { "tx_single_collisions" },
264         { "tx_mult_collisions" },
265         { "tx_deferred" },
266         { "tx_excessive_collisions" },
267         { "tx_late_collisions" },
268         { "tx_collide_2times" },
269         { "tx_collide_3times" },
270         { "tx_collide_4times" },
271         { "tx_collide_5times" },
272         { "tx_collide_6times" },
273         { "tx_collide_7times" },
274         { "tx_collide_8times" },
275         { "tx_collide_9times" },
276         { "tx_collide_10times" },
277         { "tx_collide_11times" },
278         { "tx_collide_12times" },
279         { "tx_collide_13times" },
280         { "tx_collide_14times" },
281         { "tx_collide_15times" },
282         { "tx_ucast_packets" },
283         { "tx_mcast_packets" },
284         { "tx_bcast_packets" },
285         { "tx_carrier_sense_errors" },
286         { "tx_discards" },
287         { "tx_errors" },
288
289         { "dma_writeq_full" },
290         { "dma_write_prioq_full" },
291         { "rxbds_empty" },
292         { "rx_discards" },
293         { "rx_errors" },
294         { "rx_threshold_hit" },
295
296         { "dma_readq_full" },
297         { "dma_read_prioq_full" },
298         { "tx_comp_queue_full" },
299
300         { "ring_set_send_prod_index" },
301         { "ring_status_update" },
302         { "nic_irqs" },
303         { "nic_avoided_irqs" },
304         { "nic_tx_threshold_hit" }
305 };
306
307 static const struct {
308         const char string[ETH_GSTRING_LEN];
309 } ethtool_test_keys[TG3_NUM_TEST] = {
310         { "nvram test     (online) " },
311         { "link test      (online) " },
312         { "register test  (offline)" },
313         { "memory test    (offline)" },
314         { "loopback test  (offline)" },
315         { "interrupt test (offline)" },
316 };
317
318 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
319 {
320         writel(val, tp->regs + off);
321 }
322
323 static u32 tg3_read32(struct tg3 *tp, u32 off)
324 {
325         return (readl(tp->regs + off));
326 }
327
328 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
329 {
330         writel(val, tp->aperegs + off);
331 }
332
333 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
334 {
335         return (readl(tp->aperegs + off));
336 }
337
338 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339 {
340         unsigned long flags;
341
342         spin_lock_irqsave(&tp->indirect_lock, flags);
343         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
345         spin_unlock_irqrestore(&tp->indirect_lock, flags);
346 }
347
348 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349 {
350         writel(val, tp->regs + off);
351         readl(tp->regs + off);
352 }
353
354 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355 {
356         unsigned long flags;
357         u32 val;
358
359         spin_lock_irqsave(&tp->indirect_lock, flags);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362         spin_unlock_irqrestore(&tp->indirect_lock, flags);
363         return val;
364 }
365
366 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367 {
368         unsigned long flags;
369
370         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372                                        TG3_64BIT_REG_LOW, val);
373                 return;
374         }
375         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377                                        TG3_64BIT_REG_LOW, val);
378                 return;
379         }
380
381         spin_lock_irqsave(&tp->indirect_lock, flags);
382         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384         spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386         /* In indirect mode when disabling interrupts, we also need
387          * to clear the interrupt bit in the GRC local ctrl register.
388          */
389         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390             (val == 0x1)) {
391                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393         }
394 }
395
396 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397 {
398         unsigned long flags;
399         u32 val;
400
401         spin_lock_irqsave(&tp->indirect_lock, flags);
402         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404         spin_unlock_irqrestore(&tp->indirect_lock, flags);
405         return val;
406 }
407
408 /* usec_wait specifies the wait time in usec when writing to certain registers
409  * where it is unsafe to read back the register without some delay.
410  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
412  */
413 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
414 {
415         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417                 /* Non-posted methods */
418                 tp->write32(tp, off, val);
419         else {
420                 /* Posted method */
421                 tg3_write32(tp, off, val);
422                 if (usec_wait)
423                         udelay(usec_wait);
424                 tp->read32(tp, off);
425         }
426         /* Wait again after the read for the posted method to guarantee that
427          * the wait time is met.
428          */
429         if (usec_wait)
430                 udelay(usec_wait);
431 }
432
433 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
434 {
435         tp->write32_mbox(tp, off, val);
436         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438                 tp->read32_mbox(tp, off);
439 }
440
441 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
442 {
443         void __iomem *mbox = tp->regs + off;
444         writel(val, mbox);
445         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
446                 writel(val, mbox);
447         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448                 readl(mbox);
449 }
450
451 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
452 {
453         return (readl(tp->regs + off + GRCMBOX_BASE));
454 }
455
456 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
457 {
458         writel(val, tp->regs + off + GRCMBOX_BASE);
459 }
460
461 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
462 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
463 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
464 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
465 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
466
467 #define tw32(reg,val)           tp->write32(tp, reg, val)
468 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
469 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
470 #define tr32(reg)               tp->read32(tp, reg)
471
472 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473 {
474         unsigned long flags;
475
476         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
478                 return;
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
484
485                 /* Always leave this as zero. */
486                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
487         } else {
488                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
490
491                 /* Always leave this as zero. */
492                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         }
494         spin_unlock_irqrestore(&tp->indirect_lock, flags);
495 }
496
497 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498 {
499         unsigned long flags;
500
501         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
503                 *val = 0;
504                 return;
505         }
506
507         spin_lock_irqsave(&tp->indirect_lock, flags);
508         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
511
512                 /* Always leave this as zero. */
513                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
514         } else {
515                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516                 *val = tr32(TG3PCI_MEM_WIN_DATA);
517
518                 /* Always leave this as zero. */
519                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
520         }
521         spin_unlock_irqrestore(&tp->indirect_lock, flags);
522 }
523
524 static void tg3_ape_lock_init(struct tg3 *tp)
525 {
526         int i;
527
528         /* Make sure the driver hasn't any stale locks. */
529         for (i = 0; i < 8; i++)
530                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531                                 APE_LOCK_GRANT_DRIVER);
532 }
533
534 static int tg3_ape_lock(struct tg3 *tp, int locknum)
535 {
536         int i, off;
537         int ret = 0;
538         u32 status;
539
540         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541                 return 0;
542
543         switch (locknum) {
544                 case TG3_APE_LOCK_GRC:
545                 case TG3_APE_LOCK_MEM:
546                         break;
547                 default:
548                         return -EINVAL;
549         }
550
551         off = 4 * locknum;
552
553         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
554
555         /* Wait for up to 1 millisecond to acquire lock. */
556         for (i = 0; i < 100; i++) {
557                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558                 if (status == APE_LOCK_GRANT_DRIVER)
559                         break;
560                 udelay(10);
561         }
562
563         if (status != APE_LOCK_GRANT_DRIVER) {
564                 /* Revoke the lock request. */
565                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566                                 APE_LOCK_GRANT_DRIVER);
567
568                 ret = -EBUSY;
569         }
570
571         return ret;
572 }
573
574 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575 {
576         int off;
577
578         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579                 return;
580
581         switch (locknum) {
582                 case TG3_APE_LOCK_GRC:
583                 case TG3_APE_LOCK_MEM:
584                         break;
585                 default:
586                         return;
587         }
588
589         off = 4 * locknum;
590         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
591 }
592
593 static void tg3_disable_ints(struct tg3 *tp)
594 {
595         tw32(TG3PCI_MISC_HOST_CTRL,
596              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
597         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
598 }
599
600 static inline void tg3_cond_int(struct tg3 *tp)
601 {
602         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603             (tp->hw_status->status & SD_STATUS_UPDATED))
604                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
605         else
606                 tw32(HOSTCC_MODE, tp->coalesce_mode |
607                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
608 }
609
610 static void tg3_enable_ints(struct tg3 *tp)
611 {
612         tp->irq_sync = 0;
613         wmb();
614
615         tw32(TG3PCI_MISC_HOST_CTRL,
616              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
617         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618                        (tp->last_tag << 24));
619         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621                                (tp->last_tag << 24));
622         tg3_cond_int(tp);
623 }
624
625 static inline unsigned int tg3_has_work(struct tg3 *tp)
626 {
627         struct tg3_hw_status *sblk = tp->hw_status;
628         unsigned int work_exists = 0;
629
630         /* check for phy events */
631         if (!(tp->tg3_flags &
632               (TG3_FLAG_USE_LINKCHG_REG |
633                TG3_FLAG_POLL_SERDES))) {
634                 if (sblk->status & SD_STATUS_LINK_CHG)
635                         work_exists = 1;
636         }
637         /* check for RX/TX work to do */
638         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
640                 work_exists = 1;
641
642         return work_exists;
643 }
644
645 /* tg3_restart_ints
646  *  similar to tg3_enable_ints, but it accurately determines whether there
647  *  is new work pending and can return without flushing the PIO write
648  *  which reenables interrupts
649  */
650 static void tg3_restart_ints(struct tg3 *tp)
651 {
652         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653                      tp->last_tag << 24);
654         mmiowb();
655
656         /* When doing tagged status, this work check is unnecessary.
657          * The last_tag we write above tells the chip which piece of
658          * work we've completed.
659          */
660         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
661             tg3_has_work(tp))
662                 tw32(HOSTCC_MODE, tp->coalesce_mode |
663                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
664 }
665
666 static inline void tg3_netif_stop(struct tg3 *tp)
667 {
668         tp->dev->trans_start = jiffies; /* prevent tx timeout */
669         napi_disable(&tp->napi);
670         netif_tx_disable(tp->dev);
671 }
672
673 static inline void tg3_netif_start(struct tg3 *tp)
674 {
675         netif_wake_queue(tp->dev);
676         /* NOTE: unconditional netif_wake_queue is only appropriate
677          * so long as all callers are assured to have free tx slots
678          * (such as after tg3_init_hw)
679          */
680         napi_enable(&tp->napi);
681         tp->hw_status->status |= SD_STATUS_UPDATED;
682         tg3_enable_ints(tp);
683 }
684
685 static void tg3_switch_clocks(struct tg3 *tp)
686 {
687         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
688         u32 orig_clock_ctrl;
689
690         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
692                 return;
693
694         orig_clock_ctrl = clock_ctrl;
695         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696                        CLOCK_CTRL_CLKRUN_OENABLE |
697                        0x1f);
698         tp->pci_clock_ctrl = clock_ctrl;
699
700         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
702                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
703                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
704                 }
705         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
706                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
707                             clock_ctrl |
708                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
709                             40);
710                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
712                             40);
713         }
714         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
715 }
716
717 #define PHY_BUSY_LOOPS  5000
718
719 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
720 {
721         u32 frame_val;
722         unsigned int loops;
723         int ret;
724
725         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
726                 tw32_f(MAC_MI_MODE,
727                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
728                 udelay(80);
729         }
730
731         *val = 0x0;
732
733         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734                       MI_COM_PHY_ADDR_MASK);
735         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736                       MI_COM_REG_ADDR_MASK);
737         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
738
739         tw32_f(MAC_MI_COM, frame_val);
740
741         loops = PHY_BUSY_LOOPS;
742         while (loops != 0) {
743                 udelay(10);
744                 frame_val = tr32(MAC_MI_COM);
745
746                 if ((frame_val & MI_COM_BUSY) == 0) {
747                         udelay(5);
748                         frame_val = tr32(MAC_MI_COM);
749                         break;
750                 }
751                 loops -= 1;
752         }
753
754         ret = -EBUSY;
755         if (loops != 0) {
756                 *val = frame_val & MI_COM_DATA_MASK;
757                 ret = 0;
758         }
759
760         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761                 tw32_f(MAC_MI_MODE, tp->mi_mode);
762                 udelay(80);
763         }
764
765         return ret;
766 }
767
768 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
769 {
770         u32 frame_val;
771         unsigned int loops;
772         int ret;
773
774         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
776                 return 0;
777
778         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779                 tw32_f(MAC_MI_MODE,
780                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781                 udelay(80);
782         }
783
784         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785                       MI_COM_PHY_ADDR_MASK);
786         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787                       MI_COM_REG_ADDR_MASK);
788         frame_val |= (val & MI_COM_DATA_MASK);
789         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
790
791         tw32_f(MAC_MI_COM, frame_val);
792
793         loops = PHY_BUSY_LOOPS;
794         while (loops != 0) {
795                 udelay(10);
796                 frame_val = tr32(MAC_MI_COM);
797                 if ((frame_val & MI_COM_BUSY) == 0) {
798                         udelay(5);
799                         frame_val = tr32(MAC_MI_COM);
800                         break;
801                 }
802                 loops -= 1;
803         }
804
805         ret = -EBUSY;
806         if (loops != 0)
807                 ret = 0;
808
809         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810                 tw32_f(MAC_MI_MODE, tp->mi_mode);
811                 udelay(80);
812         }
813
814         return ret;
815 }
816
817 static int tg3_bmcr_reset(struct tg3 *tp)
818 {
819         u32 phy_control;
820         int limit, err;
821
822         /* OK, reset it, and poll the BMCR_RESET bit until it
823          * clears or we time out.
824          */
825         phy_control = BMCR_RESET;
826         err = tg3_writephy(tp, MII_BMCR, phy_control);
827         if (err != 0)
828                 return -EBUSY;
829
830         limit = 5000;
831         while (limit--) {
832                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833                 if (err != 0)
834                         return -EBUSY;
835
836                 if ((phy_control & BMCR_RESET) == 0) {
837                         udelay(40);
838                         break;
839                 }
840                 udelay(10);
841         }
842         if (limit <= 0)
843                 return -EBUSY;
844
845         return 0;
846 }
847
848 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
849 {
850         struct tg3 *tp = (struct tg3 *)bp->priv;
851         u32 val;
852
853         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
854                 return -EAGAIN;
855
856         if (tg3_readphy(tp, reg, &val))
857                 return -EIO;
858
859         return val;
860 }
861
862 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
863 {
864         struct tg3 *tp = (struct tg3 *)bp->priv;
865
866         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867                 return -EAGAIN;
868
869         if (tg3_writephy(tp, reg, val))
870                 return -EIO;
871
872         return 0;
873 }
874
875 static int tg3_mdio_reset(struct mii_bus *bp)
876 {
877         return 0;
878 }
879
880 static void tg3_mdio_config(struct tg3 *tp)
881 {
882         u32 val;
883
884         if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
885             PHY_INTERFACE_MODE_RGMII)
886                 return;
887
888         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
889                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
890         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
891                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
892                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
893                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
894                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
895         }
896         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
897
898         val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
899         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
900                 val |= MAC_PHYCFG2_INBAND_ENABLE;
901         tw32(MAC_PHYCFG2, val);
902
903         val = tr32(MAC_EXT_RGMII_MODE);
904         val &= ~(MAC_RGMII_MODE_RX_INT_B |
905                  MAC_RGMII_MODE_RX_QUALITY |
906                  MAC_RGMII_MODE_RX_ACTIVITY |
907                  MAC_RGMII_MODE_RX_ENG_DET |
908                  MAC_RGMII_MODE_TX_ENABLE |
909                  MAC_RGMII_MODE_TX_LOWPWR |
910                  MAC_RGMII_MODE_TX_RESET);
911         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
912                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
913                         val |= MAC_RGMII_MODE_RX_INT_B |
914                                MAC_RGMII_MODE_RX_QUALITY |
915                                MAC_RGMII_MODE_RX_ACTIVITY |
916                                MAC_RGMII_MODE_RX_ENG_DET;
917                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
918                         val |= MAC_RGMII_MODE_TX_ENABLE |
919                                MAC_RGMII_MODE_TX_LOWPWR |
920                                MAC_RGMII_MODE_TX_RESET;
921         }
922         tw32(MAC_EXT_RGMII_MODE, val);
923 }
924
925 static void tg3_mdio_start(struct tg3 *tp)
926 {
927         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
928                 mutex_lock(&tp->mdio_bus->mdio_lock);
929                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
930                 mutex_unlock(&tp->mdio_bus->mdio_lock);
931         }
932
933         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
934         tw32_f(MAC_MI_MODE, tp->mi_mode);
935         udelay(80);
936
937         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
938                 tg3_mdio_config(tp);
939 }
940
941 static void tg3_mdio_stop(struct tg3 *tp)
942 {
943         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
944                 mutex_lock(&tp->mdio_bus->mdio_lock);
945                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
946                 mutex_unlock(&tp->mdio_bus->mdio_lock);
947         }
948 }
949
950 static int tg3_mdio_init(struct tg3 *tp)
951 {
952         int i;
953         u32 reg;
954         struct phy_device *phydev;
955
956         tg3_mdio_start(tp);
957
958         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
959             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
960                 return 0;
961
962         tp->mdio_bus = mdiobus_alloc();
963         if (tp->mdio_bus == NULL)
964                 return -ENOMEM;
965
966         tp->mdio_bus->name     = "tg3 mdio bus";
967         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
968                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
969         tp->mdio_bus->priv     = tp;
970         tp->mdio_bus->parent   = &tp->pdev->dev;
971         tp->mdio_bus->read     = &tg3_mdio_read;
972         tp->mdio_bus->write    = &tg3_mdio_write;
973         tp->mdio_bus->reset    = &tg3_mdio_reset;
974         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
975         tp->mdio_bus->irq      = &tp->mdio_irq[0];
976
977         for (i = 0; i < PHY_MAX_ADDR; i++)
978                 tp->mdio_bus->irq[i] = PHY_POLL;
979
980         /* The bus registration will look for all the PHYs on the mdio bus.
981          * Unfortunately, it does not ensure the PHY is powered up before
982          * accessing the PHY ID registers.  A chip reset is the
983          * quickest way to bring the device back to an operational state..
984          */
985         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
986                 tg3_bmcr_reset(tp);
987
988         i = mdiobus_register(tp->mdio_bus);
989         if (i) {
990                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
991                         tp->dev->name, i);
992                 return i;
993         }
994
995         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
996
997         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
998
999         switch (phydev->phy_id) {
1000         case TG3_PHY_ID_BCM50610:
1001                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1002                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1003                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1004                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1005                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1006                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1007                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1008                 break;
1009         case TG3_PHY_ID_BCMAC131:
1010                 phydev->interface = PHY_INTERFACE_MODE_MII;
1011                 break;
1012         }
1013
1014         tg3_mdio_config(tp);
1015
1016         return 0;
1017 }
1018
1019 static void tg3_mdio_fini(struct tg3 *tp)
1020 {
1021         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1022                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1023                 mdiobus_unregister(tp->mdio_bus);
1024                 mdiobus_free(tp->mdio_bus);
1025                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1026         }
1027 }
1028
1029 /* tp->lock is held. */
1030 static inline void tg3_generate_fw_event(struct tg3 *tp)
1031 {
1032         u32 val;
1033
1034         val = tr32(GRC_RX_CPU_EVENT);
1035         val |= GRC_RX_CPU_DRIVER_EVENT;
1036         tw32_f(GRC_RX_CPU_EVENT, val);
1037
1038         tp->last_event_jiffies = jiffies;
1039 }
1040
1041 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1042
1043 /* tp->lock is held. */
1044 static void tg3_wait_for_event_ack(struct tg3 *tp)
1045 {
1046         int i;
1047         unsigned int delay_cnt;
1048         long time_remain;
1049
1050         /* If enough time has passed, no wait is necessary. */
1051         time_remain = (long)(tp->last_event_jiffies + 1 +
1052                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1053                       (long)jiffies;
1054         if (time_remain < 0)
1055                 return;
1056
1057         /* Check if we can shorten the wait time. */
1058         delay_cnt = jiffies_to_usecs(time_remain);
1059         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1060                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1061         delay_cnt = (delay_cnt >> 3) + 1;
1062
1063         for (i = 0; i < delay_cnt; i++) {
1064                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1065                         break;
1066                 udelay(8);
1067         }
1068 }
1069
1070 /* tp->lock is held. */
1071 static void tg3_ump_link_report(struct tg3 *tp)
1072 {
1073         u32 reg;
1074         u32 val;
1075
1076         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1077             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1078                 return;
1079
1080         tg3_wait_for_event_ack(tp);
1081
1082         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1083
1084         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1085
1086         val = 0;
1087         if (!tg3_readphy(tp, MII_BMCR, &reg))
1088                 val = reg << 16;
1089         if (!tg3_readphy(tp, MII_BMSR, &reg))
1090                 val |= (reg & 0xffff);
1091         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1092
1093         val = 0;
1094         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1095                 val = reg << 16;
1096         if (!tg3_readphy(tp, MII_LPA, &reg))
1097                 val |= (reg & 0xffff);
1098         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1099
1100         val = 0;
1101         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1102                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1103                         val = reg << 16;
1104                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1105                         val |= (reg & 0xffff);
1106         }
1107         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1108
1109         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1110                 val = reg << 16;
1111         else
1112                 val = 0;
1113         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1114
1115         tg3_generate_fw_event(tp);
1116 }
1117
1118 static void tg3_link_report(struct tg3 *tp)
1119 {
1120         if (!netif_carrier_ok(tp->dev)) {
1121                 if (netif_msg_link(tp))
1122                         printk(KERN_INFO PFX "%s: Link is down.\n",
1123                                tp->dev->name);
1124                 tg3_ump_link_report(tp);
1125         } else if (netif_msg_link(tp)) {
1126                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1127                        tp->dev->name,
1128                        (tp->link_config.active_speed == SPEED_1000 ?
1129                         1000 :
1130                         (tp->link_config.active_speed == SPEED_100 ?
1131                          100 : 10)),
1132                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1133                         "full" : "half"));
1134
1135                 printk(KERN_INFO PFX
1136                        "%s: Flow control is %s for TX and %s for RX.\n",
1137                        tp->dev->name,
1138                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1139                        "on" : "off",
1140                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1141                        "on" : "off");
1142                 tg3_ump_link_report(tp);
1143         }
1144 }
1145
1146 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1147 {
1148         u16 miireg;
1149
1150         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1151                 miireg = ADVERTISE_PAUSE_CAP;
1152         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1153                 miireg = ADVERTISE_PAUSE_ASYM;
1154         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1155                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1156         else
1157                 miireg = 0;
1158
1159         return miireg;
1160 }
1161
1162 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1163 {
1164         u16 miireg;
1165
1166         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1167                 miireg = ADVERTISE_1000XPAUSE;
1168         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1169                 miireg = ADVERTISE_1000XPSE_ASYM;
1170         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1171                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1172         else
1173                 miireg = 0;
1174
1175         return miireg;
1176 }
1177
1178 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1179 {
1180         u8 cap = 0;
1181
1182         if (lcladv & ADVERTISE_PAUSE_CAP) {
1183                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1184                         if (rmtadv & LPA_PAUSE_CAP)
1185                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1186                         else if (rmtadv & LPA_PAUSE_ASYM)
1187                                 cap = TG3_FLOW_CTRL_RX;
1188                 } else {
1189                         if (rmtadv & LPA_PAUSE_CAP)
1190                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1191                 }
1192         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1193                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1194                         cap = TG3_FLOW_CTRL_TX;
1195         }
1196
1197         return cap;
1198 }
1199
1200 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1201 {
1202         u8 cap = 0;
1203
1204         if (lcladv & ADVERTISE_1000XPAUSE) {
1205                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1206                         if (rmtadv & LPA_1000XPAUSE)
1207                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1208                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1209                                 cap = TG3_FLOW_CTRL_RX;
1210                 } else {
1211                         if (rmtadv & LPA_1000XPAUSE)
1212                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1213                 }
1214         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1215                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1216                         cap = TG3_FLOW_CTRL_TX;
1217         }
1218
1219         return cap;
1220 }
1221
1222 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1223 {
1224         u8 autoneg;
1225         u8 flowctrl = 0;
1226         u32 old_rx_mode = tp->rx_mode;
1227         u32 old_tx_mode = tp->tx_mode;
1228
1229         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1230                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1231         else
1232                 autoneg = tp->link_config.autoneg;
1233
1234         if (autoneg == AUTONEG_ENABLE &&
1235             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1236                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1237                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1238                 else
1239                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1240         } else
1241                 flowctrl = tp->link_config.flowctrl;
1242
1243         tp->link_config.active_flowctrl = flowctrl;
1244
1245         if (flowctrl & TG3_FLOW_CTRL_RX)
1246                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1247         else
1248                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1249
1250         if (old_rx_mode != tp->rx_mode)
1251                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1252
1253         if (flowctrl & TG3_FLOW_CTRL_TX)
1254                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1255         else
1256                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1257
1258         if (old_tx_mode != tp->tx_mode)
1259                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1260 }
1261
1262 static void tg3_adjust_link(struct net_device *dev)
1263 {
1264         u8 oldflowctrl, linkmesg = 0;
1265         u32 mac_mode, lcl_adv, rmt_adv;
1266         struct tg3 *tp = netdev_priv(dev);
1267         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1268
1269         spin_lock(&tp->lock);
1270
1271         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1272                                     MAC_MODE_HALF_DUPLEX);
1273
1274         oldflowctrl = tp->link_config.active_flowctrl;
1275
1276         if (phydev->link) {
1277                 lcl_adv = 0;
1278                 rmt_adv = 0;
1279
1280                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1281                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1282                 else
1283                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1284
1285                 if (phydev->duplex == DUPLEX_HALF)
1286                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1287                 else {
1288                         lcl_adv = tg3_advert_flowctrl_1000T(
1289                                   tp->link_config.flowctrl);
1290
1291                         if (phydev->pause)
1292                                 rmt_adv = LPA_PAUSE_CAP;
1293                         if (phydev->asym_pause)
1294                                 rmt_adv |= LPA_PAUSE_ASYM;
1295                 }
1296
1297                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1298         } else
1299                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1300
1301         if (mac_mode != tp->mac_mode) {
1302                 tp->mac_mode = mac_mode;
1303                 tw32_f(MAC_MODE, tp->mac_mode);
1304                 udelay(40);
1305         }
1306
1307         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1308                 tw32(MAC_TX_LENGTHS,
1309                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1310                       (6 << TX_LENGTHS_IPG_SHIFT) |
1311                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1312         else
1313                 tw32(MAC_TX_LENGTHS,
1314                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1315                       (6 << TX_LENGTHS_IPG_SHIFT) |
1316                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1317
1318         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1319             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1320             phydev->speed != tp->link_config.active_speed ||
1321             phydev->duplex != tp->link_config.active_duplex ||
1322             oldflowctrl != tp->link_config.active_flowctrl)
1323             linkmesg = 1;
1324
1325         tp->link_config.active_speed = phydev->speed;
1326         tp->link_config.active_duplex = phydev->duplex;
1327
1328         spin_unlock(&tp->lock);
1329
1330         if (linkmesg)
1331                 tg3_link_report(tp);
1332 }
1333
1334 static int tg3_phy_init(struct tg3 *tp)
1335 {
1336         struct phy_device *phydev;
1337
1338         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1339                 return 0;
1340
1341         /* Bring the PHY back to a known state. */
1342         tg3_bmcr_reset(tp);
1343
1344         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1345
1346         /* Attach the MAC to the PHY. */
1347         phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1348                              phydev->dev_flags, phydev->interface);
1349         if (IS_ERR(phydev)) {
1350                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1351                 return PTR_ERR(phydev);
1352         }
1353
1354         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1355
1356         /* Mask with MAC supported features. */
1357         phydev->supported &= (PHY_GBIT_FEATURES |
1358                               SUPPORTED_Pause |
1359                               SUPPORTED_Asym_Pause);
1360
1361         phydev->advertising = phydev->supported;
1362
1363         return 0;
1364 }
1365
1366 static void tg3_phy_start(struct tg3 *tp)
1367 {
1368         struct phy_device *phydev;
1369
1370         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1371                 return;
1372
1373         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1374
1375         if (tp->link_config.phy_is_low_power) {
1376                 tp->link_config.phy_is_low_power = 0;
1377                 phydev->speed = tp->link_config.orig_speed;
1378                 phydev->duplex = tp->link_config.orig_duplex;
1379                 phydev->autoneg = tp->link_config.orig_autoneg;
1380                 phydev->advertising = tp->link_config.orig_advertising;
1381         }
1382
1383         phy_start(phydev);
1384
1385         phy_start_aneg(phydev);
1386 }
1387
1388 static void tg3_phy_stop(struct tg3 *tp)
1389 {
1390         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1391                 return;
1392
1393         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1394 }
1395
1396 static void tg3_phy_fini(struct tg3 *tp)
1397 {
1398         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1399                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1400                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1401         }
1402 }
1403
1404 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1405 {
1406         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1407         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1408 }
1409
1410 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1411 {
1412         u32 phy;
1413
1414         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1415             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1416                 return;
1417
1418         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1419                 u32 ephy;
1420
1421                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1422                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1423                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1424                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1425                                 if (enable)
1426                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1427                                 else
1428                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1429                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1430                         }
1431                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1432                 }
1433         } else {
1434                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1435                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1436                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1437                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1438                         if (enable)
1439                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1440                         else
1441                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1442                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1443                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1444                 }
1445         }
1446 }
1447
1448 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1449 {
1450         u32 val;
1451
1452         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1453                 return;
1454
1455         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1456             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1457                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1458                              (val | (1 << 15) | (1 << 4)));
1459 }
1460
1461 static void tg3_phy_apply_otp(struct tg3 *tp)
1462 {
1463         u32 otp, phy;
1464
1465         if (!tp->phy_otp)
1466                 return;
1467
1468         otp = tp->phy_otp;
1469
1470         /* Enable SM_DSP clock and tx 6dB coding. */
1471         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1472               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1473               MII_TG3_AUXCTL_ACTL_TX_6DB;
1474         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1475
1476         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1477         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1478         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1479
1480         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1481               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1482         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1483
1484         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1485         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1486         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1487
1488         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1489         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1490
1491         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1492         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1493
1494         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1495               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1496         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1497
1498         /* Turn off SM_DSP clock. */
1499         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1500               MII_TG3_AUXCTL_ACTL_TX_6DB;
1501         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1502 }
1503
1504 static int tg3_wait_macro_done(struct tg3 *tp)
1505 {
1506         int limit = 100;
1507
1508         while (limit--) {
1509                 u32 tmp32;
1510
1511                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1512                         if ((tmp32 & 0x1000) == 0)
1513                                 break;
1514                 }
1515         }
1516         if (limit <= 0)
1517                 return -EBUSY;
1518
1519         return 0;
1520 }
1521
1522 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1523 {
1524         static const u32 test_pat[4][6] = {
1525         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1526         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1527         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1528         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1529         };
1530         int chan;
1531
1532         for (chan = 0; chan < 4; chan++) {
1533                 int i;
1534
1535                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1536                              (chan * 0x2000) | 0x0200);
1537                 tg3_writephy(tp, 0x16, 0x0002);
1538
1539                 for (i = 0; i < 6; i++)
1540                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1541                                      test_pat[chan][i]);
1542
1543                 tg3_writephy(tp, 0x16, 0x0202);
1544                 if (tg3_wait_macro_done(tp)) {
1545                         *resetp = 1;
1546                         return -EBUSY;
1547                 }
1548
1549                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1550                              (chan * 0x2000) | 0x0200);
1551                 tg3_writephy(tp, 0x16, 0x0082);
1552                 if (tg3_wait_macro_done(tp)) {
1553                         *resetp = 1;
1554                         return -EBUSY;
1555                 }
1556
1557                 tg3_writephy(tp, 0x16, 0x0802);
1558                 if (tg3_wait_macro_done(tp)) {
1559                         *resetp = 1;
1560                         return -EBUSY;
1561                 }
1562
1563                 for (i = 0; i < 6; i += 2) {
1564                         u32 low, high;
1565
1566                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1567                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1568                             tg3_wait_macro_done(tp)) {
1569                                 *resetp = 1;
1570                                 return -EBUSY;
1571                         }
1572                         low &= 0x7fff;
1573                         high &= 0x000f;
1574                         if (low != test_pat[chan][i] ||
1575                             high != test_pat[chan][i+1]) {
1576                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1577                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1578                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1579
1580                                 return -EBUSY;
1581                         }
1582                 }
1583         }
1584
1585         return 0;
1586 }
1587
1588 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1589 {
1590         int chan;
1591
1592         for (chan = 0; chan < 4; chan++) {
1593                 int i;
1594
1595                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1596                              (chan * 0x2000) | 0x0200);
1597                 tg3_writephy(tp, 0x16, 0x0002);
1598                 for (i = 0; i < 6; i++)
1599                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1600                 tg3_writephy(tp, 0x16, 0x0202);
1601                 if (tg3_wait_macro_done(tp))
1602                         return -EBUSY;
1603         }
1604
1605         return 0;
1606 }
1607
1608 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1609 {
1610         u32 reg32, phy9_orig;
1611         int retries, do_phy_reset, err;
1612
1613         retries = 10;
1614         do_phy_reset = 1;
1615         do {
1616                 if (do_phy_reset) {
1617                         err = tg3_bmcr_reset(tp);
1618                         if (err)
1619                                 return err;
1620                         do_phy_reset = 0;
1621                 }
1622
1623                 /* Disable transmitter and interrupt.  */
1624                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1625                         continue;
1626
1627                 reg32 |= 0x3000;
1628                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1629
1630                 /* Set full-duplex, 1000 mbps.  */
1631                 tg3_writephy(tp, MII_BMCR,
1632                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1633
1634                 /* Set to master mode.  */
1635                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1636                         continue;
1637
1638                 tg3_writephy(tp, MII_TG3_CTRL,
1639                              (MII_TG3_CTRL_AS_MASTER |
1640                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1641
1642                 /* Enable SM_DSP_CLOCK and 6dB.  */
1643                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1644
1645                 /* Block the PHY control access.  */
1646                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1647                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1648
1649                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1650                 if (!err)
1651                         break;
1652         } while (--retries);
1653
1654         err = tg3_phy_reset_chanpat(tp);
1655         if (err)
1656                 return err;
1657
1658         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1659         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1660
1661         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1662         tg3_writephy(tp, 0x16, 0x0000);
1663
1664         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1665             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1666                 /* Set Extended packet length bit for jumbo frames */
1667                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1668         }
1669         else {
1670                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1671         }
1672
1673         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1674
1675         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1676                 reg32 &= ~0x3000;
1677                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1678         } else if (!err)
1679                 err = -EBUSY;
1680
1681         return err;
1682 }
1683
1684 /* This will reset the tigon3 PHY if there is no valid
1685  * link unless the FORCE argument is non-zero.
1686  */
1687 static int tg3_phy_reset(struct tg3 *tp)
1688 {
1689         u32 cpmuctrl;
1690         u32 phy_status;
1691         int err;
1692
1693         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1694                 u32 val;
1695
1696                 val = tr32(GRC_MISC_CFG);
1697                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1698                 udelay(40);
1699         }
1700         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1701         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1702         if (err != 0)
1703                 return -EBUSY;
1704
1705         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1706                 netif_carrier_off(tp->dev);
1707                 tg3_link_report(tp);
1708         }
1709
1710         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1711             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1712             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1713                 err = tg3_phy_reset_5703_4_5(tp);
1714                 if (err)
1715                         return err;
1716                 goto out;
1717         }
1718
1719         cpmuctrl = 0;
1720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1721             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1722                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1723                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1724                         tw32(TG3_CPMU_CTRL,
1725                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1726         }
1727
1728         err = tg3_bmcr_reset(tp);
1729         if (err)
1730                 return err;
1731
1732         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1733                 u32 phy;
1734
1735                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1736                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1737
1738                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1739         }
1740
1741         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1742             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1743                 u32 val;
1744
1745                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1746                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1747                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1748                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1749                         udelay(40);
1750                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1751                 }
1752
1753                 /* Disable GPHY autopowerdown. */
1754                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1755                              MII_TG3_MISC_SHDW_WREN |
1756                              MII_TG3_MISC_SHDW_APD_SEL |
1757                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1758         }
1759
1760         tg3_phy_apply_otp(tp);
1761
1762 out:
1763         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1764                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1765                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1766                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1767                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1768                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1769                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1770         }
1771         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1772                 tg3_writephy(tp, 0x1c, 0x8d68);
1773                 tg3_writephy(tp, 0x1c, 0x8d68);
1774         }
1775         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1776                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1777                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1778                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1779                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1780                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1781                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1782                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1783                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1784         }
1785         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1786                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1787                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1788                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1789                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1790                         tg3_writephy(tp, MII_TG3_TEST1,
1791                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1792                 } else
1793                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1794                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1795         }
1796         /* Set Extended packet length bit (bit 14) on all chips that */
1797         /* support jumbo frames */
1798         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1799                 /* Cannot do read-modify-write on 5401 */
1800                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1801         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1802                 u32 phy_reg;
1803
1804                 /* Set bit 14 with read-modify-write to preserve other bits */
1805                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1806                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1807                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1808         }
1809
1810         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1811          * jumbo frames transmission.
1812          */
1813         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1814                 u32 phy_reg;
1815
1816                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1817                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1819         }
1820
1821         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1822                 /* adjust output voltage */
1823                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1824         }
1825
1826         tg3_phy_toggle_automdix(tp, 1);
1827         tg3_phy_set_wirespeed(tp);
1828         return 0;
1829 }
1830
1831 static void tg3_frob_aux_power(struct tg3 *tp)
1832 {
1833         struct tg3 *tp_peer = tp;
1834
1835         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1836                 return;
1837
1838         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1839             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1840                 struct net_device *dev_peer;
1841
1842                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1843                 /* remove_one() may have been run on the peer. */
1844                 if (!dev_peer)
1845                         tp_peer = tp;
1846                 else
1847                         tp_peer = netdev_priv(dev_peer);
1848         }
1849
1850         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1851             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1852             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1853             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1854                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1855                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1856                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1857                                     (GRC_LCLCTRL_GPIO_OE0 |
1858                                      GRC_LCLCTRL_GPIO_OE1 |
1859                                      GRC_LCLCTRL_GPIO_OE2 |
1860                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1861                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1862                                     100);
1863                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1864                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1865                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1866                                              GRC_LCLCTRL_GPIO_OE1 |
1867                                              GRC_LCLCTRL_GPIO_OE2 |
1868                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1869                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1870                                              tp->grc_local_ctrl;
1871                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1872
1873                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1874                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1875
1876                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1877                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1878                 } else {
1879                         u32 no_gpio2;
1880                         u32 grc_local_ctrl = 0;
1881
1882                         if (tp_peer != tp &&
1883                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1884                                 return;
1885
1886                         /* Workaround to prevent overdrawing Amps. */
1887                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1888                             ASIC_REV_5714) {
1889                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1890                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1891                                             grc_local_ctrl, 100);
1892                         }
1893
1894                         /* On 5753 and variants, GPIO2 cannot be used. */
1895                         no_gpio2 = tp->nic_sram_data_cfg &
1896                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1897
1898                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1899                                          GRC_LCLCTRL_GPIO_OE1 |
1900                                          GRC_LCLCTRL_GPIO_OE2 |
1901                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1902                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1903                         if (no_gpio2) {
1904                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1905                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1906                         }
1907                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1908                                                     grc_local_ctrl, 100);
1909
1910                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1911
1912                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1913                                                     grc_local_ctrl, 100);
1914
1915                         if (!no_gpio2) {
1916                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1917                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1918                                             grc_local_ctrl, 100);
1919                         }
1920                 }
1921         } else {
1922                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1923                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1924                         if (tp_peer != tp &&
1925                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1926                                 return;
1927
1928                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1929                                     (GRC_LCLCTRL_GPIO_OE1 |
1930                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1931
1932                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1933                                     GRC_LCLCTRL_GPIO_OE1, 100);
1934
1935                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1936                                     (GRC_LCLCTRL_GPIO_OE1 |
1937                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1938                 }
1939         }
1940 }
1941
1942 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1943 {
1944         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1945                 return 1;
1946         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1947                 if (speed != SPEED_10)
1948                         return 1;
1949         } else if (speed == SPEED_10)
1950                 return 1;
1951
1952         return 0;
1953 }
1954
1955 static int tg3_setup_phy(struct tg3 *, int);
1956
1957 #define RESET_KIND_SHUTDOWN     0
1958 #define RESET_KIND_INIT         1
1959 #define RESET_KIND_SUSPEND      2
1960
1961 static void tg3_write_sig_post_reset(struct tg3 *, int);
1962 static int tg3_halt_cpu(struct tg3 *, u32);
1963 static int tg3_nvram_lock(struct tg3 *);
1964 static void tg3_nvram_unlock(struct tg3 *);
1965
1966 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
1967 {
1968         u32 val;
1969
1970         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1971                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1972                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1973                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1974
1975                         sg_dig_ctrl |=
1976                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1977                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1978                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1979                 }
1980                 return;
1981         }
1982
1983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1984                 tg3_bmcr_reset(tp);
1985                 val = tr32(GRC_MISC_CFG);
1986                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1987                 udelay(40);
1988                 return;
1989         } else if (do_low_power) {
1990                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1991                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1992
1993                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1994                              MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
1995                              MII_TG3_AUXCTL_PCTL_100TX_LPWR |
1996                              MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
1997                              MII_TG3_AUXCTL_PCTL_VREG_11V);
1998         }
1999
2000         /* The PHY should not be powered down on some chips because
2001          * of bugs.
2002          */
2003         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2004             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2005             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2006              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2007                 return;
2008
2009         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2010             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2011                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2012                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2013                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2014                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2015         }
2016
2017         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2018 }
2019
2020 /* tp->lock is held. */
2021 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2022 {
2023         u32 addr_high, addr_low;
2024         int i;
2025
2026         addr_high = ((tp->dev->dev_addr[0] << 8) |
2027                      tp->dev->dev_addr[1]);
2028         addr_low = ((tp->dev->dev_addr[2] << 24) |
2029                     (tp->dev->dev_addr[3] << 16) |
2030                     (tp->dev->dev_addr[4] <<  8) |
2031                     (tp->dev->dev_addr[5] <<  0));
2032         for (i = 0; i < 4; i++) {
2033                 if (i == 1 && skip_mac_1)
2034                         continue;
2035                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2036                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2037         }
2038
2039         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2040             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2041                 for (i = 0; i < 12; i++) {
2042                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2043                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2044                 }
2045         }
2046
2047         addr_high = (tp->dev->dev_addr[0] +
2048                      tp->dev->dev_addr[1] +
2049                      tp->dev->dev_addr[2] +
2050                      tp->dev->dev_addr[3] +
2051                      tp->dev->dev_addr[4] +
2052                      tp->dev->dev_addr[5]) &
2053                 TX_BACKOFF_SEED_MASK;
2054         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2055 }
2056
2057 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2058 {
2059         u32 misc_host_ctrl;
2060         bool device_should_wake, do_low_power;
2061
2062         /* Make sure register accesses (indirect or otherwise)
2063          * will function correctly.
2064          */
2065         pci_write_config_dword(tp->pdev,
2066                                TG3PCI_MISC_HOST_CTRL,
2067                                tp->misc_host_ctrl);
2068
2069         switch (state) {
2070         case PCI_D0:
2071                 pci_enable_wake(tp->pdev, state, false);
2072                 pci_set_power_state(tp->pdev, PCI_D0);
2073
2074                 /* Switch out of Vaux if it is a NIC */
2075                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2076                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2077
2078                 return 0;
2079
2080         case PCI_D1:
2081         case PCI_D2:
2082         case PCI_D3hot:
2083                 break;
2084
2085         default:
2086                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2087                         tp->dev->name, state);
2088                 return -EINVAL;
2089         }
2090         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2091         tw32(TG3PCI_MISC_HOST_CTRL,
2092              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2093
2094         device_should_wake = pci_pme_capable(tp->pdev, state) &&
2095                              device_may_wakeup(&tp->pdev->dev) &&
2096                              (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2097
2098         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2099                 do_low_power = false;
2100                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2101                     !tp->link_config.phy_is_low_power) {
2102                         struct phy_device *phydev;
2103                         u32 phyid, advertising;
2104
2105                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2106
2107                         tp->link_config.phy_is_low_power = 1;
2108
2109                         tp->link_config.orig_speed = phydev->speed;
2110                         tp->link_config.orig_duplex = phydev->duplex;
2111                         tp->link_config.orig_autoneg = phydev->autoneg;
2112                         tp->link_config.orig_advertising = phydev->advertising;
2113
2114                         advertising = ADVERTISED_TP |
2115                                       ADVERTISED_Pause |
2116                                       ADVERTISED_Autoneg |
2117                                       ADVERTISED_10baseT_Half;
2118
2119                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2120                             device_should_wake) {
2121                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2122                                         advertising |=
2123                                                 ADVERTISED_100baseT_Half |
2124                                                 ADVERTISED_100baseT_Full |
2125                                                 ADVERTISED_10baseT_Full;
2126                                 else
2127                                         advertising |= ADVERTISED_10baseT_Full;
2128                         }
2129
2130                         phydev->advertising = advertising;
2131
2132                         phy_start_aneg(phydev);
2133
2134                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2135                         if (phyid != TG3_PHY_ID_BCMAC131) {
2136                                 phyid &= TG3_PHY_OUI_MASK;
2137                                 if (phyid == TG3_PHY_OUI_1 &&
2138                                     phyid == TG3_PHY_OUI_2 &&
2139                                     phyid == TG3_PHY_OUI_3)
2140                                         do_low_power = true;
2141                         }
2142                 }
2143         } else {
2144                 do_low_power = false;
2145
2146                 if (tp->link_config.phy_is_low_power == 0) {
2147                         tp->link_config.phy_is_low_power = 1;
2148                         tp->link_config.orig_speed = tp->link_config.speed;
2149                         tp->link_config.orig_duplex = tp->link_config.duplex;
2150                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2151                 }
2152
2153                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2154                         tp->link_config.speed = SPEED_10;
2155                         tp->link_config.duplex = DUPLEX_HALF;
2156                         tp->link_config.autoneg = AUTONEG_ENABLE;
2157                         tg3_setup_phy(tp, 0);
2158                 }
2159         }
2160
2161         __tg3_set_mac_addr(tp, 0);
2162
2163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2164                 u32 val;
2165
2166                 val = tr32(GRC_VCPU_EXT_CTRL);
2167                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2168         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2169                 int i;
2170                 u32 val;
2171
2172                 for (i = 0; i < 200; i++) {
2173                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2174                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2175                                 break;
2176                         msleep(1);
2177                 }
2178         }
2179         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2180                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2181                                                      WOL_DRV_STATE_SHUTDOWN |
2182                                                      WOL_DRV_WOL |
2183                                                      WOL_SET_MAGIC_PKT);
2184
2185         if (device_should_wake) {
2186                 u32 mac_mode;
2187
2188                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2189                         if (do_low_power) {
2190                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2191                                 udelay(40);
2192                         }
2193
2194                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2195                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2196                         else
2197                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2198
2199                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2200                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2201                             ASIC_REV_5700) {
2202                                 u32 speed = (tp->tg3_flags &
2203                                              TG3_FLAG_WOL_SPEED_100MB) ?
2204                                              SPEED_100 : SPEED_10;
2205                                 if (tg3_5700_link_polarity(tp, speed))
2206                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2207                                 else
2208                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2209                         }
2210                 } else {
2211                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2212                 }
2213
2214                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2215                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2216
2217                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2218                 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2219                     !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2220                     ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2221                      (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2222                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2223
2224                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2225                         mac_mode |= tp->mac_mode &
2226                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2227                         if (mac_mode & MAC_MODE_APE_TX_EN)
2228                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2229                 }
2230
2231                 tw32_f(MAC_MODE, mac_mode);
2232                 udelay(100);
2233
2234                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2235                 udelay(10);
2236         }
2237
2238         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2239             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2240              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2241                 u32 base_val;
2242
2243                 base_val = tp->pci_clock_ctrl;
2244                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2245                              CLOCK_CTRL_TXCLK_DISABLE);
2246
2247                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2248                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2249         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2250                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2251                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2252                 /* do nothing */
2253         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2254                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2255                 u32 newbits1, newbits2;
2256
2257                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2258                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2259                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2260                                     CLOCK_CTRL_TXCLK_DISABLE |
2261                                     CLOCK_CTRL_ALTCLK);
2262                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2263                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2264                         newbits1 = CLOCK_CTRL_625_CORE;
2265                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2266                 } else {
2267                         newbits1 = CLOCK_CTRL_ALTCLK;
2268                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2269                 }
2270
2271                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2272                             40);
2273
2274                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2275                             40);
2276
2277                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2278                         u32 newbits3;
2279
2280                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2281                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2282                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2283                                             CLOCK_CTRL_TXCLK_DISABLE |
2284                                             CLOCK_CTRL_44MHZ_CORE);
2285                         } else {
2286                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2287                         }
2288
2289                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2290                                     tp->pci_clock_ctrl | newbits3, 40);
2291                 }
2292         }
2293
2294         if (!(device_should_wake) &&
2295             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2296             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2297                 tg3_power_down_phy(tp, do_low_power);
2298
2299         tg3_frob_aux_power(tp);
2300
2301         /* Workaround for unstable PLL clock */
2302         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2303             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2304                 u32 val = tr32(0x7d00);
2305
2306                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2307                 tw32(0x7d00, val);
2308                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2309                         int err;
2310
2311                         err = tg3_nvram_lock(tp);
2312                         tg3_halt_cpu(tp, RX_CPU_BASE);
2313                         if (!err)
2314                                 tg3_nvram_unlock(tp);
2315                 }
2316         }
2317
2318         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2319
2320         if (device_should_wake)
2321                 pci_enable_wake(tp->pdev, state, true);
2322
2323         /* Finally, set the new power state. */
2324         pci_set_power_state(tp->pdev, state);
2325
2326         return 0;
2327 }
2328
2329 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2330 {
2331         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2332         case MII_TG3_AUX_STAT_10HALF:
2333                 *speed = SPEED_10;
2334                 *duplex = DUPLEX_HALF;
2335                 break;
2336
2337         case MII_TG3_AUX_STAT_10FULL:
2338                 *speed = SPEED_10;
2339                 *duplex = DUPLEX_FULL;
2340                 break;
2341
2342         case MII_TG3_AUX_STAT_100HALF:
2343                 *speed = SPEED_100;
2344                 *duplex = DUPLEX_HALF;
2345                 break;
2346
2347         case MII_TG3_AUX_STAT_100FULL:
2348                 *speed = SPEED_100;
2349                 *duplex = DUPLEX_FULL;
2350                 break;
2351
2352         case MII_TG3_AUX_STAT_1000HALF:
2353                 *speed = SPEED_1000;
2354                 *duplex = DUPLEX_HALF;
2355                 break;
2356
2357         case MII_TG3_AUX_STAT_1000FULL:
2358                 *speed = SPEED_1000;
2359                 *duplex = DUPLEX_FULL;
2360                 break;
2361
2362         default:
2363                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2364                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2365                                  SPEED_10;
2366                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2367                                   DUPLEX_HALF;
2368                         break;
2369                 }
2370                 *speed = SPEED_INVALID;
2371                 *duplex = DUPLEX_INVALID;
2372                 break;
2373         }
2374 }
2375
2376 static void tg3_phy_copper_begin(struct tg3 *tp)
2377 {
2378         u32 new_adv;
2379         int i;
2380
2381         if (tp->link_config.phy_is_low_power) {
2382                 /* Entering low power mode.  Disable gigabit and
2383                  * 100baseT advertisements.
2384                  */
2385                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2386
2387                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2388                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2389                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2390                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2391
2392                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2393         } else if (tp->link_config.speed == SPEED_INVALID) {
2394                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2395                         tp->link_config.advertising &=
2396                                 ~(ADVERTISED_1000baseT_Half |
2397                                   ADVERTISED_1000baseT_Full);
2398
2399                 new_adv = ADVERTISE_CSMA;
2400                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2401                         new_adv |= ADVERTISE_10HALF;
2402                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2403                         new_adv |= ADVERTISE_10FULL;
2404                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2405                         new_adv |= ADVERTISE_100HALF;
2406                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2407                         new_adv |= ADVERTISE_100FULL;
2408
2409                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2410
2411                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2412
2413                 if (tp->link_config.advertising &
2414                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2415                         new_adv = 0;
2416                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2417                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2418                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2419                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2420                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2421                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2422                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2423                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2424                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2425                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2426                 } else {
2427                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2428                 }
2429         } else {
2430                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2431                 new_adv |= ADVERTISE_CSMA;
2432
2433                 /* Asking for a specific link mode. */
2434                 if (tp->link_config.speed == SPEED_1000) {
2435                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2436
2437                         if (tp->link_config.duplex == DUPLEX_FULL)
2438                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2439                         else
2440                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2441                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2442                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2443                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2444                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2445                 } else {
2446                         if (tp->link_config.speed == SPEED_100) {
2447                                 if (tp->link_config.duplex == DUPLEX_FULL)
2448                                         new_adv |= ADVERTISE_100FULL;
2449                                 else
2450                                         new_adv |= ADVERTISE_100HALF;
2451                         } else {
2452                                 if (tp->link_config.duplex == DUPLEX_FULL)
2453                                         new_adv |= ADVERTISE_10FULL;
2454                                 else
2455                                         new_adv |= ADVERTISE_10HALF;
2456                         }
2457                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2458
2459                         new_adv = 0;
2460                 }
2461
2462                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2463         }
2464
2465         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2466             tp->link_config.speed != SPEED_INVALID) {
2467                 u32 bmcr, orig_bmcr;
2468
2469                 tp->link_config.active_speed = tp->link_config.speed;
2470                 tp->link_config.active_duplex = tp->link_config.duplex;
2471
2472                 bmcr = 0;
2473                 switch (tp->link_config.speed) {
2474                 default:
2475                 case SPEED_10:
2476                         break;
2477
2478                 case SPEED_100:
2479                         bmcr |= BMCR_SPEED100;
2480                         break;
2481
2482                 case SPEED_1000:
2483                         bmcr |= TG3_BMCR_SPEED1000;
2484                         break;
2485                 }
2486
2487                 if (tp->link_config.duplex == DUPLEX_FULL)
2488                         bmcr |= BMCR_FULLDPLX;
2489
2490                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2491                     (bmcr != orig_bmcr)) {
2492                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2493                         for (i = 0; i < 1500; i++) {
2494                                 u32 tmp;
2495
2496                                 udelay(10);
2497                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2498                                     tg3_readphy(tp, MII_BMSR, &tmp))
2499                                         continue;
2500                                 if (!(tmp & BMSR_LSTATUS)) {
2501                                         udelay(40);
2502                                         break;
2503                                 }
2504                         }
2505                         tg3_writephy(tp, MII_BMCR, bmcr);
2506                         udelay(40);
2507                 }
2508         } else {
2509                 tg3_writephy(tp, MII_BMCR,
2510                              BMCR_ANENABLE | BMCR_ANRESTART);
2511         }
2512 }
2513
2514 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2515 {
2516         int err;
2517
2518         /* Turn off tap power management. */
2519         /* Set Extended packet length bit */
2520         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2521
2522         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2523         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2524
2525         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2526         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2527
2528         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2529         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2530
2531         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2532         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2533
2534         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2535         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2536
2537         udelay(40);
2538
2539         return err;
2540 }
2541
2542 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2543 {
2544         u32 adv_reg, all_mask = 0;
2545
2546         if (mask & ADVERTISED_10baseT_Half)
2547                 all_mask |= ADVERTISE_10HALF;
2548         if (mask & ADVERTISED_10baseT_Full)
2549                 all_mask |= ADVERTISE_10FULL;
2550         if (mask & ADVERTISED_100baseT_Half)
2551                 all_mask |= ADVERTISE_100HALF;
2552         if (mask & ADVERTISED_100baseT_Full)
2553                 all_mask |= ADVERTISE_100FULL;
2554
2555         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2556                 return 0;
2557
2558         if ((adv_reg & all_mask) != all_mask)
2559                 return 0;
2560         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2561                 u32 tg3_ctrl;
2562
2563                 all_mask = 0;
2564                 if (mask & ADVERTISED_1000baseT_Half)
2565                         all_mask |= ADVERTISE_1000HALF;
2566                 if (mask & ADVERTISED_1000baseT_Full)
2567                         all_mask |= ADVERTISE_1000FULL;
2568
2569                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2570                         return 0;
2571
2572                 if ((tg3_ctrl & all_mask) != all_mask)
2573                         return 0;
2574         }
2575         return 1;
2576 }
2577
2578 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2579 {
2580         u32 curadv, reqadv;
2581
2582         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2583                 return 1;
2584
2585         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2586         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2587
2588         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2589                 if (curadv != reqadv)
2590                         return 0;
2591
2592                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2593                         tg3_readphy(tp, MII_LPA, rmtadv);
2594         } else {
2595                 /* Reprogram the advertisement register, even if it
2596                  * does not affect the current link.  If the link
2597                  * gets renegotiated in the future, we can save an
2598                  * additional renegotiation cycle by advertising
2599                  * it correctly in the first place.
2600                  */
2601                 if (curadv != reqadv) {
2602                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2603                                      ADVERTISE_PAUSE_ASYM);
2604                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2605                 }
2606         }
2607
2608         return 1;
2609 }
2610
2611 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2612 {
2613         int current_link_up;
2614         u32 bmsr, dummy;
2615         u32 lcl_adv, rmt_adv;
2616         u16 current_speed;
2617         u8 current_duplex;
2618         int i, err;
2619
2620         tw32(MAC_EVENT, 0);
2621
2622         tw32_f(MAC_STATUS,
2623              (MAC_STATUS_SYNC_CHANGED |
2624               MAC_STATUS_CFG_CHANGED |
2625               MAC_STATUS_MI_COMPLETION |
2626               MAC_STATUS_LNKSTATE_CHANGED));
2627         udelay(40);
2628
2629         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2630                 tw32_f(MAC_MI_MODE,
2631                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2632                 udelay(80);
2633         }
2634
2635         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2636
2637         /* Some third-party PHYs need to be reset on link going
2638          * down.
2639          */
2640         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2641              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2642              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2643             netif_carrier_ok(tp->dev)) {
2644                 tg3_readphy(tp, MII_BMSR, &bmsr);
2645                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2646                     !(bmsr & BMSR_LSTATUS))
2647                         force_reset = 1;
2648         }
2649         if (force_reset)
2650                 tg3_phy_reset(tp);
2651
2652         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2653                 tg3_readphy(tp, MII_BMSR, &bmsr);
2654                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2655                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2656                         bmsr = 0;
2657
2658                 if (!(bmsr & BMSR_LSTATUS)) {
2659                         err = tg3_init_5401phy_dsp(tp);
2660                         if (err)
2661                                 return err;
2662
2663                         tg3_readphy(tp, MII_BMSR, &bmsr);
2664                         for (i = 0; i < 1000; i++) {
2665                                 udelay(10);
2666                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2667                                     (bmsr & BMSR_LSTATUS)) {
2668                                         udelay(40);
2669                                         break;
2670                                 }
2671                         }
2672
2673                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2674                             !(bmsr & BMSR_LSTATUS) &&
2675                             tp->link_config.active_speed == SPEED_1000) {
2676                                 err = tg3_phy_reset(tp);
2677                                 if (!err)
2678                                         err = tg3_init_5401phy_dsp(tp);
2679                                 if (err)
2680                                         return err;
2681                         }
2682                 }
2683         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2684                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2685                 /* 5701 {A0,B0} CRC bug workaround */
2686                 tg3_writephy(tp, 0x15, 0x0a75);
2687                 tg3_writephy(tp, 0x1c, 0x8c68);
2688                 tg3_writephy(tp, 0x1c, 0x8d68);
2689                 tg3_writephy(tp, 0x1c, 0x8c68);
2690         }
2691
2692         /* Clear pending interrupts... */
2693         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2694         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2695
2696         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2697                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2698         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2699                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2700
2701         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2702             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2703                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2704                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2705                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2706                 else
2707                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2708         }
2709
2710         current_link_up = 0;
2711         current_speed = SPEED_INVALID;
2712         current_duplex = DUPLEX_INVALID;
2713
2714         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2715                 u32 val;
2716
2717                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2718                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2719                 if (!(val & (1 << 10))) {
2720                         val |= (1 << 10);
2721                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2722                         goto relink;
2723                 }
2724         }
2725
2726         bmsr = 0;
2727         for (i = 0; i < 100; i++) {
2728                 tg3_readphy(tp, MII_BMSR, &bmsr);
2729                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2730                     (bmsr & BMSR_LSTATUS))
2731                         break;
2732                 udelay(40);
2733         }
2734
2735         if (bmsr & BMSR_LSTATUS) {
2736                 u32 aux_stat, bmcr;
2737
2738                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2739                 for (i = 0; i < 2000; i++) {
2740                         udelay(10);
2741                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2742                             aux_stat)
2743                                 break;
2744                 }
2745
2746                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2747                                              &current_speed,
2748                                              &current_duplex);
2749
2750                 bmcr = 0;
2751                 for (i = 0; i < 200; i++) {
2752                         tg3_readphy(tp, MII_BMCR, &bmcr);
2753                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2754                                 continue;
2755                         if (bmcr && bmcr != 0x7fff)
2756                                 break;
2757                         udelay(10);
2758                 }
2759
2760                 lcl_adv = 0;
2761                 rmt_adv = 0;
2762
2763                 tp->link_config.active_speed = current_speed;
2764                 tp->link_config.active_duplex = current_duplex;
2765
2766                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2767                         if ((bmcr & BMCR_ANENABLE) &&
2768                             tg3_copper_is_advertising_all(tp,
2769                                                 tp->link_config.advertising)) {
2770                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2771                                                                   &rmt_adv))
2772                                         current_link_up = 1;
2773                         }
2774                 } else {
2775                         if (!(bmcr & BMCR_ANENABLE) &&
2776                             tp->link_config.speed == current_speed &&
2777                             tp->link_config.duplex == current_duplex &&
2778                             tp->link_config.flowctrl ==
2779                             tp->link_config.active_flowctrl) {
2780                                 current_link_up = 1;
2781                         }
2782                 }
2783
2784                 if (current_link_up == 1 &&
2785                     tp->link_config.active_duplex == DUPLEX_FULL)
2786                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2787         }
2788
2789 relink:
2790         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2791                 u32 tmp;
2792
2793                 tg3_phy_copper_begin(tp);
2794
2795                 tg3_readphy(tp, MII_BMSR, &tmp);
2796                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2797                     (tmp & BMSR_LSTATUS))
2798                         current_link_up = 1;
2799         }
2800
2801         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2802         if (current_link_up == 1) {
2803                 if (tp->link_config.active_speed == SPEED_100 ||
2804                     tp->link_config.active_speed == SPEED_10)
2805                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2806                 else
2807                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2808         } else
2809                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2810
2811         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2812         if (tp->link_config.active_duplex == DUPLEX_HALF)
2813                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2814
2815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2816                 if (current_link_up == 1 &&
2817                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2818                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2819                 else
2820                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2821         }
2822
2823         /* ??? Without this setting Netgear GA302T PHY does not
2824          * ??? send/receive packets...
2825          */
2826         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2827             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2828                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2829                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2830                 udelay(80);
2831         }
2832
2833         tw32_f(MAC_MODE, tp->mac_mode);
2834         udelay(40);
2835
2836         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2837                 /* Polled via timer. */
2838                 tw32_f(MAC_EVENT, 0);
2839         } else {
2840                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2841         }
2842         udelay(40);
2843
2844         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2845             current_link_up == 1 &&
2846             tp->link_config.active_speed == SPEED_1000 &&
2847             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2848              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2849                 udelay(120);
2850                 tw32_f(MAC_STATUS,
2851                      (MAC_STATUS_SYNC_CHANGED |
2852                       MAC_STATUS_CFG_CHANGED));
2853                 udelay(40);
2854                 tg3_write_mem(tp,
2855                               NIC_SRAM_FIRMWARE_MBOX,
2856                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2857         }
2858
2859         if (current_link_up != netif_carrier_ok(tp->dev)) {
2860                 if (current_link_up)
2861                         netif_carrier_on(tp->dev);
2862                 else
2863                         netif_carrier_off(tp->dev);
2864                 tg3_link_report(tp);
2865         }
2866
2867         return 0;
2868 }
2869
2870 struct tg3_fiber_aneginfo {
2871         int state;
2872 #define ANEG_STATE_UNKNOWN              0
2873 #define ANEG_STATE_AN_ENABLE            1
2874 #define ANEG_STATE_RESTART_INIT         2
2875 #define ANEG_STATE_RESTART              3
2876 #define ANEG_STATE_DISABLE_LINK_OK      4
2877 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2878 #define ANEG_STATE_ABILITY_DETECT       6
2879 #define ANEG_STATE_ACK_DETECT_INIT      7
2880 #define ANEG_STATE_ACK_DETECT           8
2881 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2882 #define ANEG_STATE_COMPLETE_ACK         10
2883 #define ANEG_STATE_IDLE_DETECT_INIT     11
2884 #define ANEG_STATE_IDLE_DETECT          12
2885 #define ANEG_STATE_LINK_OK              13
2886 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2887 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2888
2889         u32 flags;
2890 #define MR_AN_ENABLE            0x00000001
2891 #define MR_RESTART_AN           0x00000002
2892 #define MR_AN_COMPLETE          0x00000004
2893 #define MR_PAGE_RX              0x00000008
2894 #define MR_NP_LOADED            0x00000010
2895 #define MR_TOGGLE_TX            0x00000020
2896 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2897 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2898 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2899 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2900 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2901 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2902 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2903 #define MR_TOGGLE_RX            0x00002000
2904 #define MR_NP_RX                0x00004000
2905
2906 #define MR_LINK_OK              0x80000000
2907
2908         unsigned long link_time, cur_time;
2909
2910         u32 ability_match_cfg;
2911         int ability_match_count;
2912
2913         char ability_match, idle_match, ack_match;
2914
2915         u32 txconfig, rxconfig;
2916 #define ANEG_CFG_NP             0x00000080
2917 #define ANEG_CFG_ACK            0x00000040
2918 #define ANEG_CFG_RF2            0x00000020
2919 #define ANEG_CFG_RF1            0x00000010
2920 #define ANEG_CFG_PS2            0x00000001
2921 #define ANEG_CFG_PS1            0x00008000
2922 #define ANEG_CFG_HD             0x00004000
2923 #define ANEG_CFG_FD             0x00002000
2924 #define ANEG_CFG_INVAL          0x00001f06
2925
2926 };
2927 #define ANEG_OK         0
2928 #define ANEG_DONE       1
2929 #define ANEG_TIMER_ENAB 2
2930 #define ANEG_FAILED     -1
2931
2932 #define ANEG_STATE_SETTLE_TIME  10000
2933
2934 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2935                                    struct tg3_fiber_aneginfo *ap)
2936 {
2937         u16 flowctrl;
2938         unsigned long delta;
2939         u32 rx_cfg_reg;
2940         int ret;
2941
2942         if (ap->state == ANEG_STATE_UNKNOWN) {
2943                 ap->rxconfig = 0;
2944                 ap->link_time = 0;
2945                 ap->cur_time = 0;
2946                 ap->ability_match_cfg = 0;
2947                 ap->ability_match_count = 0;
2948                 ap->ability_match = 0;
2949                 ap->idle_match = 0;
2950                 ap->ack_match = 0;
2951         }
2952         ap->cur_time++;
2953
2954         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2955                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2956
2957                 if (rx_cfg_reg != ap->ability_match_cfg) {
2958                         ap->ability_match_cfg = rx_cfg_reg;
2959                         ap->ability_match = 0;
2960                         ap->ability_match_count = 0;
2961                 } else {
2962                         if (++ap->ability_match_count > 1) {
2963                                 ap->ability_match = 1;
2964                                 ap->ability_match_cfg = rx_cfg_reg;
2965                         }
2966                 }
2967                 if (rx_cfg_reg & ANEG_CFG_ACK)
2968                         ap->ack_match = 1;
2969                 else
2970                         ap->ack_match = 0;
2971
2972                 ap->idle_match = 0;
2973         } else {
2974                 ap->idle_match = 1;
2975                 ap->ability_match_cfg = 0;
2976                 ap->ability_match_count = 0;
2977                 ap->ability_match = 0;
2978                 ap->ack_match = 0;
2979
2980                 rx_cfg_reg = 0;
2981         }
2982
2983         ap->rxconfig = rx_cfg_reg;
2984         ret = ANEG_OK;
2985
2986         switch(ap->state) {
2987         case ANEG_STATE_UNKNOWN:
2988                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2989                         ap->state = ANEG_STATE_AN_ENABLE;
2990
2991                 /* fallthru */
2992         case ANEG_STATE_AN_ENABLE:
2993                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2994                 if (ap->flags & MR_AN_ENABLE) {
2995                         ap->link_time = 0;
2996                         ap->cur_time = 0;
2997                         ap->ability_match_cfg = 0;
2998                         ap->ability_match_count = 0;
2999                         ap->ability_match = 0;
3000                         ap->idle_match = 0;
3001                         ap->ack_match = 0;
3002
3003                         ap->state = ANEG_STATE_RESTART_INIT;
3004                 } else {
3005                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3006                 }
3007                 break;
3008
3009         case ANEG_STATE_RESTART_INIT:
3010                 ap->link_time = ap->cur_time;
3011                 ap->flags &= ~(MR_NP_LOADED);
3012                 ap->txconfig = 0;
3013                 tw32(MAC_TX_AUTO_NEG, 0);
3014                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3015                 tw32_f(MAC_MODE, tp->mac_mode);
3016                 udelay(40);
3017
3018                 ret = ANEG_TIMER_ENAB;
3019                 ap->state = ANEG_STATE_RESTART;
3020
3021                 /* fallthru */
3022         case ANEG_STATE_RESTART:
3023                 delta = ap->cur_time - ap->link_time;
3024                 if (delta > ANEG_STATE_SETTLE_TIME) {
3025                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3026                 } else {
3027                         ret = ANEG_TIMER_ENAB;
3028                 }
3029                 break;
3030
3031         case ANEG_STATE_DISABLE_LINK_OK:
3032                 ret = ANEG_DONE;
3033                 break;
3034
3035         case ANEG_STATE_ABILITY_DETECT_INIT:
3036                 ap->flags &= ~(MR_TOGGLE_TX);
3037                 ap->txconfig = ANEG_CFG_FD;
3038                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3039                 if (flowctrl & ADVERTISE_1000XPAUSE)
3040                         ap->txconfig |= ANEG_CFG_PS1;
3041                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3042                         ap->txconfig |= ANEG_CFG_PS2;
3043                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3044                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3045                 tw32_f(MAC_MODE, tp->mac_mode);
3046                 udelay(40);
3047
3048                 ap->state = ANEG_STATE_ABILITY_DETECT;
3049                 break;
3050
3051         case ANEG_STATE_ABILITY_DETECT:
3052                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3053                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3054                 }
3055                 break;
3056
3057         case ANEG_STATE_ACK_DETECT_INIT:
3058                 ap->txconfig |= ANEG_CFG_ACK;
3059                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3060                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3061                 tw32_f(MAC_MODE, tp->mac_mode);
3062                 udelay(40);
3063
3064                 ap->state = ANEG_STATE_ACK_DETECT;
3065
3066                 /* fallthru */
3067         case ANEG_STATE_ACK_DETECT:
3068                 if (ap->ack_match != 0) {
3069                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3070                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3071                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3072                         } else {
3073                                 ap->state = ANEG_STATE_AN_ENABLE;
3074                         }
3075                 } else if (ap->ability_match != 0 &&
3076                            ap->rxconfig == 0) {
3077                         ap->state = ANEG_STATE_AN_ENABLE;
3078                 }
3079                 break;
3080
3081         case ANEG_STATE_COMPLETE_ACK_INIT:
3082                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3083                         ret = ANEG_FAILED;
3084                         break;
3085                 }
3086                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3087                                MR_LP_ADV_HALF_DUPLEX |
3088                                MR_LP_ADV_SYM_PAUSE |
3089                                MR_LP_ADV_ASYM_PAUSE |
3090                                MR_LP_ADV_REMOTE_FAULT1 |
3091                                MR_LP_ADV_REMOTE_FAULT2 |
3092                                MR_LP_ADV_NEXT_PAGE |
3093                                MR_TOGGLE_RX |
3094                                MR_NP_RX);
3095                 if (ap->rxconfig & ANEG_CFG_FD)
3096                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3097                 if (ap->rxconfig & ANEG_CFG_HD)
3098                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3099                 if (ap->rxconfig & ANEG_CFG_PS1)
3100                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3101                 if (ap->rxconfig & ANEG_CFG_PS2)
3102                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3103                 if (ap->rxconfig & ANEG_CFG_RF1)
3104                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3105                 if (ap->rxconfig & ANEG_CFG_RF2)
3106                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3107                 if (ap->rxconfig & ANEG_CFG_NP)
3108                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3109
3110                 ap->link_time = ap->cur_time;
3111
3112                 ap->flags ^= (MR_TOGGLE_TX);
3113                 if (ap->rxconfig & 0x0008)
3114                         ap->flags |= MR_TOGGLE_RX;
3115                 if (ap->rxconfig & ANEG_CFG_NP)
3116                         ap->flags |= MR_NP_RX;
3117                 ap->flags |= MR_PAGE_RX;
3118
3119                 ap->state = ANEG_STATE_COMPLETE_ACK;
3120                 ret = ANEG_TIMER_ENAB;
3121                 break;
3122
3123         case ANEG_STATE_COMPLETE_ACK:
3124                 if (ap->ability_match != 0 &&
3125                     ap->rxconfig == 0) {
3126                         ap->state = ANEG_STATE_AN_ENABLE;
3127                         break;
3128                 }
3129                 delta = ap->cur_time - ap->link_time;
3130                 if (delta > ANEG_STATE_SETTLE_TIME) {
3131                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3132                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3133                         } else {
3134                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3135                                     !(ap->flags & MR_NP_RX)) {
3136                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3137                                 } else {
3138                                         ret = ANEG_FAILED;
3139                                 }
3140                         }
3141                 }
3142                 break;
3143
3144         case ANEG_STATE_IDLE_DETECT_INIT:
3145                 ap->link_time = ap->cur_time;
3146                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3147                 tw32_f(MAC_MODE, tp->mac_mode);
3148                 udelay(40);
3149
3150                 ap->state = ANEG_STATE_IDLE_DETECT;
3151                 ret = ANEG_TIMER_ENAB;
3152                 break;
3153
3154         case ANEG_STATE_IDLE_DETECT:
3155                 if (ap->ability_match != 0 &&
3156                     ap->rxconfig == 0) {
3157                         ap->state = ANEG_STATE_AN_ENABLE;
3158                         break;
3159                 }
3160                 delta = ap->cur_time - ap->link_time;
3161                 if (delta > ANEG_STATE_SETTLE_TIME) {
3162                         /* XXX another gem from the Broadcom driver :( */
3163                         ap->state = ANEG_STATE_LINK_OK;
3164                 }
3165                 break;
3166
3167         case ANEG_STATE_LINK_OK:
3168                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3169                 ret = ANEG_DONE;
3170                 break;
3171
3172         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3173                 /* ??? unimplemented */
3174                 break;
3175
3176         case ANEG_STATE_NEXT_PAGE_WAIT:
3177                 /* ??? unimplemented */
3178                 break;
3179
3180         default:
3181                 ret = ANEG_FAILED;
3182                 break;
3183         }
3184
3185         return ret;
3186 }
3187
3188 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3189 {
3190         int res = 0;
3191         struct tg3_fiber_aneginfo aninfo;
3192         int status = ANEG_FAILED;
3193         unsigned int tick;
3194         u32 tmp;
3195
3196         tw32_f(MAC_TX_AUTO_NEG, 0);
3197
3198         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3199         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3200         udelay(40);
3201
3202         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3203         udelay(40);
3204
3205         memset(&aninfo, 0, sizeof(aninfo));
3206         aninfo.flags |= MR_AN_ENABLE;
3207         aninfo.state = ANEG_STATE_UNKNOWN;
3208         aninfo.cur_time = 0;
3209         tick = 0;
3210         while (++tick < 195000) {
3211                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3212                 if (status == ANEG_DONE || status == ANEG_FAILED)
3213                         break;
3214
3215                 udelay(1);
3216         }
3217
3218         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3219         tw32_f(MAC_MODE, tp->mac_mode);
3220         udelay(40);
3221
3222         *txflags = aninfo.txconfig;
3223         *rxflags = aninfo.flags;
3224
3225         if (status == ANEG_DONE &&
3226             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3227                              MR_LP_ADV_FULL_DUPLEX)))
3228                 res = 1;
3229
3230         return res;
3231 }
3232
3233 static void tg3_init_bcm8002(struct tg3 *tp)
3234 {
3235         u32 mac_status = tr32(MAC_STATUS);
3236         int i;
3237
3238         /* Reset when initting first time or we have a link. */
3239         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3240             !(mac_status & MAC_STATUS_PCS_SYNCED))
3241                 return;
3242
3243         /* Set PLL lock range. */
3244         tg3_writephy(tp, 0x16, 0x8007);
3245
3246         /* SW reset */
3247         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3248
3249         /* Wait for reset to complete. */
3250         /* XXX schedule_timeout() ... */
3251         for (i = 0; i < 500; i++)
3252                 udelay(10);
3253
3254         /* Config mode; select PMA/Ch 1 regs. */
3255         tg3_writephy(tp, 0x10, 0x8411);
3256
3257         /* Enable auto-lock and comdet, select txclk for tx. */
3258         tg3_writephy(tp, 0x11, 0x0a10);
3259
3260         tg3_writephy(tp, 0x18, 0x00a0);
3261         tg3_writephy(tp, 0x16, 0x41ff);
3262
3263         /* Assert and deassert POR. */
3264         tg3_writephy(tp, 0x13, 0x0400);
3265         udelay(40);
3266         tg3_writephy(tp, 0x13, 0x0000);
3267
3268         tg3_writephy(tp, 0x11, 0x0a50);
3269         udelay(40);
3270         tg3_writephy(tp, 0x11, 0x0a10);
3271
3272         /* Wait for signal to stabilize */
3273         /* XXX schedule_timeout() ... */
3274         for (i = 0; i < 15000; i++)
3275                 udelay(10);
3276
3277         /* Deselect the channel register so we can read the PHYID
3278          * later.
3279          */
3280         tg3_writephy(tp, 0x10, 0x8011);
3281 }
3282
3283 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3284 {
3285         u16 flowctrl;
3286         u32 sg_dig_ctrl, sg_dig_status;
3287         u32 serdes_cfg, expected_sg_dig_ctrl;
3288         int workaround, port_a;
3289         int current_link_up;
3290
3291         serdes_cfg = 0;
3292         expected_sg_dig_ctrl = 0;
3293         workaround = 0;
3294         port_a = 1;
3295         current_link_up = 0;
3296
3297         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3298             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3299                 workaround = 1;
3300                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3301                         port_a = 0;
3302
3303                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3304                 /* preserve bits 20-23 for voltage regulator */
3305                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3306         }
3307
3308         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3309
3310         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3311                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3312                         if (workaround) {
3313                                 u32 val = serdes_cfg;
3314
3315                                 if (port_a)
3316                                         val |= 0xc010000;
3317                                 else
3318                                         val |= 0x4010000;
3319                                 tw32_f(MAC_SERDES_CFG, val);
3320                         }
3321
3322                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3323                 }
3324                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3325                         tg3_setup_flow_control(tp, 0, 0);
3326                         current_link_up = 1;
3327                 }
3328                 goto out;
3329         }
3330
3331         /* Want auto-negotiation.  */
3332         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3333
3334         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3335         if (flowctrl & ADVERTISE_1000XPAUSE)
3336                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3337         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3338                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3339
3340         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3341                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3342                     tp->serdes_counter &&
3343                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3344                                     MAC_STATUS_RCVD_CFG)) ==
3345                      MAC_STATUS_PCS_SYNCED)) {
3346                         tp->serdes_counter--;
3347                         current_link_up = 1;
3348                         goto out;
3349                 }
3350 restart_autoneg:
3351                 if (workaround)
3352                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3353                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3354                 udelay(5);
3355                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3356
3357                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3358                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3359         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3360                                  MAC_STATUS_SIGNAL_DET)) {
3361                 sg_dig_status = tr32(SG_DIG_STATUS);
3362                 mac_status = tr32(MAC_STATUS);
3363
3364                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3365                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3366                         u32 local_adv = 0, remote_adv = 0;
3367
3368                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3369                                 local_adv |= ADVERTISE_1000XPAUSE;
3370                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3371                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3372
3373                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3374                                 remote_adv |= LPA_1000XPAUSE;
3375                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3376                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3377
3378                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3379                         current_link_up = 1;
3380                         tp->serdes_counter = 0;
3381                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3382                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3383                         if (tp->serdes_counter)
3384                                 tp->serdes_counter--;
3385                         else {
3386                                 if (workaround) {
3387                                         u32 val = serdes_cfg;
3388
3389                                         if (port_a)
3390                                                 val |= 0xc010000;
3391                                         else
3392                                                 val |= 0x4010000;
3393
3394                                         tw32_f(MAC_SERDES_CFG, val);
3395                                 }
3396
3397                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3398                                 udelay(40);
3399
3400                                 /* Link parallel detection - link is up */
3401                                 /* only if we have PCS_SYNC and not */
3402                                 /* receiving config code words */
3403                                 mac_status = tr32(MAC_STATUS);
3404                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3405                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3406                                         tg3_setup_flow_control(tp, 0, 0);
3407                                         current_link_up = 1;
3408                                         tp->tg3_flags2 |=
3409                                                 TG3_FLG2_PARALLEL_DETECT;
3410                                         tp->serdes_counter =
3411                                                 SERDES_PARALLEL_DET_TIMEOUT;
3412                                 } else
3413                                         goto restart_autoneg;
3414                         }
3415                 }
3416         } else {
3417                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3418                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3419         }
3420
3421 out:
3422         return current_link_up;
3423 }
3424
3425 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3426 {
3427         int current_link_up = 0;
3428
3429         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3430                 goto out;
3431
3432         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3433                 u32 txflags, rxflags;
3434                 int i;
3435
3436                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3437                         u32 local_adv = 0, remote_adv = 0;
3438
3439                         if (txflags & ANEG_CFG_PS1)
3440                                 local_adv |= ADVERTISE_1000XPAUSE;
3441                         if (txflags & ANEG_CFG_PS2)
3442                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3443
3444                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3445                                 remote_adv |= LPA_1000XPAUSE;
3446                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3447                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3448
3449                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3450
3451                         current_link_up = 1;
3452                 }
3453                 for (i = 0; i < 30; i++) {
3454                         udelay(20);
3455                         tw32_f(MAC_STATUS,
3456                                (MAC_STATUS_SYNC_CHANGED |
3457                                 MAC_STATUS_CFG_CHANGED));
3458                         udelay(40);
3459                         if ((tr32(MAC_STATUS) &
3460                              (MAC_STATUS_SYNC_CHANGED |
3461                               MAC_STATUS_CFG_CHANGED)) == 0)
3462                                 break;
3463                 }
3464
3465                 mac_status = tr32(MAC_STATUS);
3466                 if (current_link_up == 0 &&
3467                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3468                     !(mac_status & MAC_STATUS_RCVD_CFG))
3469                         current_link_up = 1;
3470         } else {
3471                 tg3_setup_flow_control(tp, 0, 0);
3472
3473                 /* Forcing 1000FD link up. */
3474                 current_link_up = 1;
3475
3476                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3477                 udelay(40);
3478
3479                 tw32_f(MAC_MODE, tp->mac_mode);
3480                 udelay(40);
3481         }
3482
3483 out:
3484         return current_link_up;
3485 }
3486
3487 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3488 {
3489         u32 orig_pause_cfg;
3490         u16 orig_active_speed;
3491         u8 orig_active_duplex;
3492         u32 mac_status;
3493         int current_link_up;
3494         int i;
3495
3496         orig_pause_cfg = tp->link_config.active_flowctrl;
3497         orig_active_speed = tp->link_config.active_speed;
3498         orig_active_duplex = tp->link_config.active_duplex;
3499
3500         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3501             netif_carrier_ok(tp->dev) &&
3502             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3503                 mac_status = tr32(MAC_STATUS);
3504                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3505                                MAC_STATUS_SIGNAL_DET |
3506                                MAC_STATUS_CFG_CHANGED |
3507                                MAC_STATUS_RCVD_CFG);
3508                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3509                                    MAC_STATUS_SIGNAL_DET)) {
3510                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3511                                             MAC_STATUS_CFG_CHANGED));
3512                         return 0;
3513                 }
3514         }
3515
3516         tw32_f(MAC_TX_AUTO_NEG, 0);
3517
3518         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3519         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3520         tw32_f(MAC_MODE, tp->mac_mode);
3521         udelay(40);
3522
3523         if (tp->phy_id == PHY_ID_BCM8002)
3524                 tg3_init_bcm8002(tp);
3525
3526         /* Enable link change event even when serdes polling.  */
3527         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3528         udelay(40);
3529
3530         current_link_up = 0;
3531         mac_status = tr32(MAC_STATUS);
3532
3533         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3534                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3535         else
3536                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3537
3538         tp->hw_status->status =
3539                 (SD_STATUS_UPDATED |
3540                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3541
3542         for (i = 0; i < 100; i++) {
3543                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3544                                     MAC_STATUS_CFG_CHANGED));
3545                 udelay(5);
3546                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3547                                          MAC_STATUS_CFG_CHANGED |
3548                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3549                         break;
3550         }
3551
3552         mac_status = tr32(MAC_STATUS);
3553         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3554                 current_link_up = 0;
3555                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3556                     tp->serdes_counter == 0) {
3557                         tw32_f(MAC_MODE, (tp->mac_mode |
3558                                           MAC_MODE_SEND_CONFIGS));
3559                         udelay(1);
3560                         tw32_f(MAC_MODE, tp->mac_mode);
3561                 }
3562         }
3563
3564         if (current_link_up == 1) {
3565                 tp->link_config.active_speed = SPEED_1000;
3566                 tp->link_config.active_duplex = DUPLEX_FULL;
3567                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3568                                     LED_CTRL_LNKLED_OVERRIDE |
3569                                     LED_CTRL_1000MBPS_ON));
3570         } else {
3571                 tp->link_config.active_speed = SPEED_INVALID;
3572                 tp->link_config.active_duplex = DUPLEX_INVALID;
3573                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3574                                     LED_CTRL_LNKLED_OVERRIDE |
3575                                     LED_CTRL_TRAFFIC_OVERRIDE));
3576         }
3577
3578         if (current_link_up != netif_carrier_ok(tp->dev)) {
3579                 if (current_link_up)
3580                         netif_carrier_on(tp->dev);
3581                 else
3582                         netif_carrier_off(tp->dev);
3583                 tg3_link_report(tp);
3584         } else {
3585                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3586                 if (orig_pause_cfg != now_pause_cfg ||
3587                     orig_active_speed != tp->link_config.active_speed ||
3588                     orig_active_duplex != tp->link_config.active_duplex)
3589                         tg3_link_report(tp);
3590         }
3591
3592         return 0;
3593 }
3594
3595 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3596 {
3597         int current_link_up, err = 0;
3598         u32 bmsr, bmcr;
3599         u16 current_speed;
3600         u8 current_duplex;
3601         u32 local_adv, remote_adv;
3602
3603         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3604         tw32_f(MAC_MODE, tp->mac_mode);
3605         udelay(40);
3606
3607         tw32(MAC_EVENT, 0);
3608
3609         tw32_f(MAC_STATUS,
3610              (MAC_STATUS_SYNC_CHANGED |
3611               MAC_STATUS_CFG_CHANGED |
3612               MAC_STATUS_MI_COMPLETION |
3613               MAC_STATUS_LNKSTATE_CHANGED));
3614         udelay(40);
3615
3616         if (force_reset)
3617                 tg3_phy_reset(tp);
3618
3619         current_link_up = 0;
3620         current_speed = SPEED_INVALID;
3621         current_duplex = DUPLEX_INVALID;
3622
3623         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3624         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3626                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3627                         bmsr |= BMSR_LSTATUS;
3628                 else
3629                         bmsr &= ~BMSR_LSTATUS;
3630         }
3631
3632         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3633
3634         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3635             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3636                 /* do nothing, just check for link up at the end */
3637         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3638                 u32 adv, new_adv;
3639
3640                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3641                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3642                                   ADVERTISE_1000XPAUSE |
3643                                   ADVERTISE_1000XPSE_ASYM |
3644                                   ADVERTISE_SLCT);
3645
3646                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3647
3648                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3649                         new_adv |= ADVERTISE_1000XHALF;
3650                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3651                         new_adv |= ADVERTISE_1000XFULL;
3652
3653                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3654                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3655                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3656                         tg3_writephy(tp, MII_BMCR, bmcr);
3657
3658                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3659                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3660                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3661
3662                         return err;
3663                 }
3664         } else {
3665                 u32 new_bmcr;
3666
3667                 bmcr &= ~BMCR_SPEED1000;
3668                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3669
3670                 if (tp->link_config.duplex == DUPLEX_FULL)
3671                         new_bmcr |= BMCR_FULLDPLX;
3672
3673                 if (new_bmcr != bmcr) {
3674                         /* BMCR_SPEED1000 is a reserved bit that needs
3675                          * to be set on write.
3676                          */
3677                         new_bmcr |= BMCR_SPEED1000;
3678
3679                         /* Force a linkdown */
3680                         if (netif_carrier_ok(tp->dev)) {
3681                                 u32 adv;
3682
3683                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3684                                 adv &= ~(ADVERTISE_1000XFULL |
3685                                          ADVERTISE_1000XHALF |
3686                                          ADVERTISE_SLCT);
3687                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3688                                 tg3_writephy(tp, MII_BMCR, bmcr |
3689                                                            BMCR_ANRESTART |
3690                                                            BMCR_ANENABLE);
3691                                 udelay(10);
3692                                 netif_carrier_off(tp->dev);
3693                         }
3694                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3695                         bmcr = new_bmcr;
3696                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3697                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3698                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3699                             ASIC_REV_5714) {
3700                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3701                                         bmsr |= BMSR_LSTATUS;
3702                                 else
3703                                         bmsr &= ~BMSR_LSTATUS;
3704                         }
3705                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3706                 }
3707         }
3708
3709         if (bmsr & BMSR_LSTATUS) {
3710                 current_speed = SPEED_1000;
3711                 current_link_up = 1;
3712                 if (bmcr & BMCR_FULLDPLX)
3713                         current_duplex = DUPLEX_FULL;
3714                 else
3715                         current_duplex = DUPLEX_HALF;
3716
3717                 local_adv = 0;
3718                 remote_adv = 0;
3719
3720                 if (bmcr & BMCR_ANENABLE) {
3721                         u32 common;
3722
3723                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3724                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3725                         common = local_adv & remote_adv;
3726                         if (common & (ADVERTISE_1000XHALF |
3727                                       ADVERTISE_1000XFULL)) {
3728                                 if (common & ADVERTISE_1000XFULL)
3729                                         current_duplex = DUPLEX_FULL;
3730                                 else
3731                                         current_duplex = DUPLEX_HALF;
3732                         }
3733                         else
3734                                 current_link_up = 0;
3735                 }
3736         }
3737
3738         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3739                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3740
3741         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3742         if (tp->link_config.active_duplex == DUPLEX_HALF)
3743                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3744
3745         tw32_f(MAC_MODE, tp->mac_mode);
3746         udelay(40);
3747
3748         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3749
3750         tp->link_config.active_speed = current_speed;
3751         tp->link_config.active_duplex = current_duplex;
3752
3753         if (current_link_up != netif_carrier_ok(tp->dev)) {
3754                 if (current_link_up)
3755                         netif_carrier_on(tp->dev);
3756                 else {
3757                         netif_carrier_off(tp->dev);
3758                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3759                 }
3760                 tg3_link_report(tp);
3761         }
3762         return err;
3763 }
3764
3765 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3766 {
3767         if (tp->serdes_counter) {
3768                 /* Give autoneg time to complete. */
3769                 tp->serdes_counter--;
3770                 return;
3771         }
3772         if (!netif_carrier_ok(tp->dev) &&
3773             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3774                 u32 bmcr;
3775
3776                 tg3_readphy(tp, MII_BMCR, &bmcr);
3777                 if (bmcr & BMCR_ANENABLE) {
3778                         u32 phy1, phy2;
3779
3780                         /* Select shadow register 0x1f */
3781                         tg3_writephy(tp, 0x1c, 0x7c00);
3782                         tg3_readphy(tp, 0x1c, &phy1);
3783
3784                         /* Select expansion interrupt status register */
3785                         tg3_writephy(tp, 0x17, 0x0f01);
3786                         tg3_readphy(tp, 0x15, &phy2);
3787                         tg3_readphy(tp, 0x15, &phy2);
3788
3789                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3790                                 /* We have signal detect and not receiving
3791                                  * config code words, link is up by parallel
3792                                  * detection.
3793                                  */
3794
3795                                 bmcr &= ~BMCR_ANENABLE;
3796                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3797                                 tg3_writephy(tp, MII_BMCR, bmcr);
3798                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3799                         }
3800                 }
3801         }
3802         else if (netif_carrier_ok(tp->dev) &&
3803                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3804                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3805                 u32 phy2;
3806
3807                 /* Select expansion interrupt status register */
3808                 tg3_writephy(tp, 0x17, 0x0f01);
3809                 tg3_readphy(tp, 0x15, &phy2);
3810                 if (phy2 & 0x20) {
3811                         u32 bmcr;
3812
3813                         /* Config code words received, turn on autoneg. */
3814                         tg3_readphy(tp, MII_BMCR, &bmcr);
3815                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3816
3817                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3818
3819                 }
3820         }
3821 }
3822
3823 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3824 {
3825         int err;
3826
3827         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3828                 err = tg3_setup_fiber_phy(tp, force_reset);
3829         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3830                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3831         } else {
3832                 err = tg3_setup_copper_phy(tp, force_reset);
3833         }
3834
3835         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3836                 u32 val, scale;
3837
3838                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3839                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3840                         scale = 65;
3841                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3842                         scale = 6;
3843                 else
3844                         scale = 12;
3845
3846                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3847                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3848                 tw32(GRC_MISC_CFG, val);
3849         }
3850
3851         if (tp->link_config.active_speed == SPEED_1000 &&
3852             tp->link_config.active_duplex == DUPLEX_HALF)
3853                 tw32(MAC_TX_LENGTHS,
3854                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3855                       (6 << TX_LENGTHS_IPG_SHIFT) |
3856                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3857         else
3858                 tw32(MAC_TX_LENGTHS,
3859                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3860                       (6 << TX_LENGTHS_IPG_SHIFT) |
3861                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3862
3863         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3864                 if (netif_carrier_ok(tp->dev)) {
3865                         tw32(HOSTCC_STAT_COAL_TICKS,
3866                              tp->coal.stats_block_coalesce_usecs);
3867                 } else {
3868                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3869                 }
3870         }
3871
3872         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3873                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3874                 if (!netif_carrier_ok(tp->dev))
3875                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3876                               tp->pwrmgmt_thresh;
3877                 else
3878                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3879                 tw32(PCIE_PWR_MGMT_THRESH, val);
3880         }
3881
3882         return err;
3883 }
3884
3885 /* This is called whenever we suspect that the system chipset is re-
3886  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3887  * is bogus tx completions. We try to recover by setting the
3888  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3889  * in the workqueue.
3890  */
3891 static void tg3_tx_recover(struct tg3 *tp)
3892 {
3893         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3894                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3895
3896         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3897                "mapped I/O cycles to the network device, attempting to "
3898                "recover. Please report the problem to the driver maintainer "
3899                "and include system chipset information.\n", tp->dev->name);
3900
3901         spin_lock(&tp->lock);
3902         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3903         spin_unlock(&tp->lock);
3904 }
3905
3906 static inline u32 tg3_tx_avail(struct tg3 *tp)
3907 {
3908         smp_mb();
3909         return (tp->tx_pending -
3910                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3911 }
3912
3913 /* Tigon3 never reports partial packet sends.  So we do not
3914  * need special logic to handle SKBs that have not had all
3915  * of their frags sent yet, like SunGEM does.
3916  */
3917 static void tg3_tx(struct tg3 *tp)
3918 {
3919         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3920         u32 sw_idx = tp->tx_cons;
3921
3922         while (sw_idx != hw_idx) {
3923                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3924                 struct sk_buff *skb = ri->skb;
3925                 int i, tx_bug = 0;
3926
3927                 if (unlikely(skb == NULL)) {
3928                         tg3_tx_recover(tp);
3929                         return;
3930                 }
3931
3932                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3933
3934                 ri->skb = NULL;
3935
3936                 sw_idx = NEXT_TX(sw_idx);
3937
3938                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3939                         ri = &tp->tx_buffers[sw_idx];
3940                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3941                                 tx_bug = 1;
3942                         sw_idx = NEXT_TX(sw_idx);
3943                 }
3944
3945                 dev_kfree_skb(skb);
3946
3947                 if (unlikely(tx_bug)) {
3948                         tg3_tx_recover(tp);
3949                         return;
3950                 }
3951         }
3952
3953         tp->tx_cons = sw_idx;
3954
3955         /* Need to make the tx_cons update visible to tg3_start_xmit()
3956          * before checking for netif_queue_stopped().  Without the
3957          * memory barrier, there is a small possibility that tg3_start_xmit()
3958          * will miss it and cause the queue to be stopped forever.
3959          */
3960         smp_mb();
3961
3962         if (unlikely(netif_queue_stopped(tp->dev) &&
3963                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3964                 netif_tx_lock(tp->dev);
3965                 if (netif_queue_stopped(tp->dev) &&
3966                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3967                         netif_wake_queue(tp->dev);
3968                 netif_tx_unlock(tp->dev);
3969         }
3970 }
3971
3972 /* Returns size of skb allocated or < 0 on error.
3973  *
3974  * We only need to fill in the address because the other members
3975  * of the RX descriptor are invariant, see tg3_init_rings.
3976  *
3977  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3978  * posting buffers we only dirty the first cache line of the RX
3979  * descriptor (containing the address).  Whereas for the RX status
3980  * buffers the cpu only reads the last cacheline of the RX descriptor
3981  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3982  */
3983 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3984                             int src_idx, u32 dest_idx_unmasked)
3985 {
3986         struct tg3_rx_buffer_desc *desc;
3987         struct ring_info *map, *src_map;
3988         struct sk_buff *skb;
3989         dma_addr_t mapping;
3990         int skb_size, dest_idx;
3991
3992         src_map = NULL;
3993         switch (opaque_key) {
3994         case RXD_OPAQUE_RING_STD:
3995                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3996                 desc = &tp->rx_std[dest_idx];
3997                 map = &tp->rx_std_buffers[dest_idx];
3998                 if (src_idx >= 0)
3999                         src_map = &tp->rx_std_buffers[src_idx];
4000                 skb_size = tp->rx_pkt_buf_sz;
4001                 break;
4002
4003         case RXD_OPAQUE_RING_JUMBO:
4004                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4005                 desc = &tp->rx_jumbo[dest_idx];
4006                 map = &tp->rx_jumbo_buffers[dest_idx];
4007                 if (src_idx >= 0)
4008                         src_map = &tp->rx_jumbo_buffers[src_idx];
4009                 skb_size = RX_JUMBO_PKT_BUF_SZ;
4010                 break;
4011
4012         default:
4013                 return -EINVAL;
4014         }
4015
4016         /* Do not overwrite any of the map or rp information
4017          * until we are sure we can commit to a new buffer.
4018          *
4019          * Callers depend upon this behavior and assume that
4020          * we leave everything unchanged if we fail.
4021          */
4022         skb = netdev_alloc_skb(tp->dev, skb_size);
4023         if (skb == NULL)
4024                 return -ENOMEM;
4025
4026         skb_reserve(skb, tp->rx_offset);
4027
4028         mapping = pci_map_single(tp->pdev, skb->data,
4029                                  skb_size - tp->rx_offset,
4030                                  PCI_DMA_FROMDEVICE);
4031
4032         map->skb = skb;
4033         pci_unmap_addr_set(map, mapping, mapping);
4034
4035         if (src_map != NULL)
4036                 src_map->skb = NULL;
4037
4038         desc->addr_hi = ((u64)mapping >> 32);
4039         desc->addr_lo = ((u64)mapping & 0xffffffff);
4040
4041         return skb_size;
4042 }
4043
4044 /* We only need to move over in the address because the other
4045  * members of the RX descriptor are invariant.  See notes above
4046  * tg3_alloc_rx_skb for full details.
4047  */
4048 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4049                            int src_idx, u32 dest_idx_unmasked)
4050 {
4051         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4052         struct ring_info *src_map, *dest_map;
4053         int dest_idx;
4054
4055         switch (opaque_key) {
4056         case RXD_OPAQUE_RING_STD:
4057                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4058                 dest_desc = &tp->rx_std[dest_idx];
4059                 dest_map = &tp->rx_std_buffers[dest_idx];
4060                 src_desc = &tp->rx_std[src_idx];
4061                 src_map = &tp->rx_std_buffers[src_idx];
4062                 break;
4063
4064         case RXD_OPAQUE_RING_JUMBO:
4065                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4066                 dest_desc = &tp->rx_jumbo[dest_idx];
4067                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4068                 src_desc = &tp->rx_jumbo[src_idx];
4069                 src_map = &tp->rx_jumbo_buffers[src_idx];
4070                 break;
4071
4072         default:
4073                 return;
4074         }
4075
4076         dest_map->skb = src_map->skb;
4077         pci_unmap_addr_set(dest_map, mapping,
4078                            pci_unmap_addr(src_map, mapping));
4079         dest_desc->addr_hi = src_desc->addr_hi;
4080         dest_desc->addr_lo = src_desc->addr_lo;
4081
4082         src_map->skb = NULL;
4083 }
4084
4085 #if TG3_VLAN_TAG_USED
4086 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4087 {
4088         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4089 }
4090 #endif
4091
4092 /* The RX ring scheme is composed of multiple rings which post fresh
4093  * buffers to the chip, and one special ring the chip uses to report
4094  * status back to the host.
4095  *
4096  * The special ring reports the status of received packets to the
4097  * host.  The chip does not write into the original descriptor the
4098  * RX buffer was obtained from.  The chip simply takes the original
4099  * descriptor as provided by the host, updates the status and length
4100  * field, then writes this into the next status ring entry.
4101  *
4102  * Each ring the host uses to post buffers to the chip is described
4103  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4104  * it is first placed into the on-chip ram.  When the packet's length
4105  * is known, it walks down the TG3_BDINFO entries to select the ring.
4106  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4107  * which is within the range of the new packet's length is chosen.
4108  *
4109  * The "separate ring for rx status" scheme may sound queer, but it makes
4110  * sense from a cache coherency perspective.  If only the host writes
4111  * to the buffer post rings, and only the chip writes to the rx status
4112  * rings, then cache lines never move beyond shared-modified state.
4113  * If both the host and chip were to write into the same ring, cache line
4114  * eviction could occur since both entities want it in an exclusive state.
4115  */
4116 static int tg3_rx(struct tg3 *tp, int budget)
4117 {
4118         u32 work_mask, rx_std_posted = 0;
4119         u32 sw_idx = tp->rx_rcb_ptr;
4120         u16 hw_idx;
4121         int received;
4122
4123         hw_idx = tp->hw_status->idx[0].rx_producer;
4124         /*
4125          * We need to order the read of hw_idx and the read of
4126          * the opaque cookie.
4127          */
4128         rmb();
4129         work_mask = 0;
4130         received = 0;
4131         while (sw_idx != hw_idx && budget > 0) {
4132                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4133                 unsigned int len;
4134                 struct sk_buff *skb;
4135                 dma_addr_t dma_addr;
4136                 u32 opaque_key, desc_idx, *post_ptr;
4137
4138                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4139                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4140                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4141                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4142                                                   mapping);
4143                         skb = tp->rx_std_buffers[desc_idx].skb;
4144                         post_ptr = &tp->rx_std_ptr;
4145                         rx_std_posted++;
4146                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4147                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4148                                                   mapping);
4149                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4150                         post_ptr = &tp->rx_jumbo_ptr;
4151                 }
4152                 else {
4153                         goto next_pkt_nopost;
4154                 }
4155
4156                 work_mask |= opaque_key;
4157
4158                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4159                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4160                 drop_it:
4161                         tg3_recycle_rx(tp, opaque_key,
4162                                        desc_idx, *post_ptr);
4163                 drop_it_no_recycle:
4164                         /* Other statistics kept track of by card. */
4165                         tp->net_stats.rx_dropped++;
4166                         goto next_pkt;
4167                 }
4168
4169                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4170
4171                 if (len > RX_COPY_THRESHOLD
4172                         && tp->rx_offset == 2
4173                         /* rx_offset != 2 iff this is a 5701 card running
4174                          * in PCI-X mode [see tg3_get_invariants()] */
4175                 ) {
4176                         int skb_size;
4177
4178                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4179                                                     desc_idx, *post_ptr);
4180                         if (skb_size < 0)
4181                                 goto drop_it;
4182
4183                         pci_unmap_single(tp->pdev, dma_addr,
4184                                          skb_size - tp->rx_offset,
4185                                          PCI_DMA_FROMDEVICE);
4186
4187                         skb_put(skb, len);
4188                 } else {
4189                         struct sk_buff *copy_skb;
4190
4191                         tg3_recycle_rx(tp, opaque_key,
4192                                        desc_idx, *post_ptr);
4193
4194                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4195                         if (copy_skb == NULL)
4196                                 goto drop_it_no_recycle;
4197
4198                         skb_reserve(copy_skb, 2);
4199                         skb_put(copy_skb, len);
4200                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4201                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4202                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4203
4204                         /* We'll reuse the original ring buffer. */
4205                         skb = copy_skb;
4206                 }
4207
4208                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4209                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4210                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4211                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4212                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4213                 else
4214                         skb->ip_summed = CHECKSUM_NONE;
4215
4216                 skb->protocol = eth_type_trans(skb, tp->dev);
4217 #if TG3_VLAN_TAG_USED
4218                 if (tp->vlgrp != NULL &&
4219                     desc->type_flags & RXD_FLAG_VLAN) {
4220                         tg3_vlan_rx(tp, skb,
4221                                     desc->err_vlan & RXD_VLAN_MASK);
4222                 } else
4223 #endif
4224                         netif_receive_skb(skb);
4225
4226                 tp->dev->last_rx = jiffies;
4227                 received++;
4228                 budget--;
4229
4230 next_pkt:
4231                 (*post_ptr)++;
4232
4233                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4234                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4235
4236                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4237                                      TG3_64BIT_REG_LOW, idx);
4238                         work_mask &= ~RXD_OPAQUE_RING_STD;
4239                         rx_std_posted = 0;
4240                 }
4241 next_pkt_nopost:
4242                 sw_idx++;
4243                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4244
4245                 /* Refresh hw_idx to see if there is new work */
4246                 if (sw_idx == hw_idx) {
4247                         hw_idx = tp->hw_status->idx[0].rx_producer;
4248                         rmb();
4249                 }
4250         }
4251
4252         /* ACK the status ring. */
4253         tp->rx_rcb_ptr = sw_idx;
4254         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4255
4256         /* Refill RX ring(s). */
4257         if (work_mask & RXD_OPAQUE_RING_STD) {
4258                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4259                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4260                              sw_idx);
4261         }
4262         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4263                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4264                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4265                              sw_idx);
4266         }
4267         mmiowb();
4268
4269         return received;
4270 }
4271
4272 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4273 {
4274         struct tg3_hw_status *sblk = tp->hw_status;
4275
4276         /* handle link change and other phy events */
4277         if (!(tp->tg3_flags &
4278               (TG3_FLAG_USE_LINKCHG_REG |
4279                TG3_FLAG_POLL_SERDES))) {
4280                 if (sblk->status & SD_STATUS_LINK_CHG) {
4281                         sblk->status = SD_STATUS_UPDATED |
4282                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4283                         spin_lock(&tp->lock);
4284                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4285                                 tw32_f(MAC_STATUS,
4286                                      (MAC_STATUS_SYNC_CHANGED |
4287                                       MAC_STATUS_CFG_CHANGED |
4288                                       MAC_STATUS_MI_COMPLETION |
4289                                       MAC_STATUS_LNKSTATE_CHANGED));
4290                                 udelay(40);
4291                         } else
4292                                 tg3_setup_phy(tp, 0);
4293                         spin_unlock(&tp->lock);
4294                 }
4295         }
4296
4297         /* run TX completion thread */
4298         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4299                 tg3_tx(tp);
4300                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4301                         return work_done;
4302         }
4303
4304         /* run RX thread, within the bounds set by NAPI.
4305          * All RX "locking" is done by ensuring outside
4306          * code synchronizes with tg3->napi.poll()
4307          */
4308         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4309                 work_done += tg3_rx(tp, budget - work_done);
4310
4311         return work_done;
4312 }
4313
4314 static int tg3_poll(struct napi_struct *napi, int budget)
4315 {
4316         struct tg3 *tp = container_of(napi, struct tg3, napi);
4317         int work_done = 0;
4318         struct tg3_hw_status *sblk = tp->hw_status;
4319
4320         while (1) {
4321                 work_done = tg3_poll_work(tp, work_done, budget);
4322
4323                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4324                         goto tx_recovery;
4325
4326                 if (unlikely(work_done >= budget))
4327                         break;
4328
4329                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4330                         /* tp->last_tag is used in tg3_restart_ints() below
4331                          * to tell the hw how much work has been processed,
4332                          * so we must read it before checking for more work.
4333                          */
4334                         tp->last_tag = sblk->status_tag;
4335                         rmb();
4336                 } else
4337                         sblk->status &= ~SD_STATUS_UPDATED;
4338
4339                 if (likely(!tg3_has_work(tp))) {
4340                         netif_rx_complete(tp->dev, napi);
4341                         tg3_restart_ints(tp);
4342                         break;
4343                 }
4344         }
4345
4346         return work_done;
4347
4348 tx_recovery:
4349         /* work_done is guaranteed to be less than budget. */
4350         netif_rx_complete(tp->dev, napi);
4351         schedule_work(&tp->reset_task);
4352         return work_done;
4353 }
4354
4355 static void tg3_irq_quiesce(struct tg3 *tp)
4356 {
4357         BUG_ON(tp->irq_sync);
4358
4359         tp->irq_sync = 1;
4360         smp_mb();
4361
4362         synchronize_irq(tp->pdev->irq);
4363 }
4364
4365 static inline int tg3_irq_sync(struct tg3 *tp)
4366 {
4367         return tp->irq_sync;
4368 }
4369
4370 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4371  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4372  * with as well.  Most of the time, this is not necessary except when
4373  * shutting down the device.
4374  */
4375 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4376 {
4377         spin_lock_bh(&tp->lock);
4378         if (irq_sync)
4379                 tg3_irq_quiesce(tp);
4380 }
4381
4382 static inline void tg3_full_unlock(struct tg3 *tp)
4383 {
4384         spin_unlock_bh(&tp->lock);
4385 }
4386
4387 /* One-shot MSI handler - Chip automatically disables interrupt
4388  * after sending MSI so driver doesn't have to do it.
4389  */
4390 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4391 {
4392         struct net_device *dev = dev_id;
4393         struct tg3 *tp = netdev_priv(dev);
4394
4395         prefetch(tp->hw_status);
4396         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4397
4398         if (likely(!tg3_irq_sync(tp)))
4399                 netif_rx_schedule(dev, &tp->napi);
4400
4401         return IRQ_HANDLED;
4402 }
4403
4404 /* MSI ISR - No need to check for interrupt sharing and no need to
4405  * flush status block and interrupt mailbox. PCI ordering rules
4406  * guarantee that MSI will arrive after the status block.
4407  */
4408 static irqreturn_t tg3_msi(int irq, void *dev_id)
4409 {
4410         struct net_device *dev = dev_id;
4411         struct tg3 *tp = netdev_priv(dev);
4412
4413         prefetch(tp->hw_status);
4414         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4415         /*
4416          * Writing any value to intr-mbox-0 clears PCI INTA# and
4417          * chip-internal interrupt pending events.
4418          * Writing non-zero to intr-mbox-0 additional tells the
4419          * NIC to stop sending us irqs, engaging "in-intr-handler"
4420          * event coalescing.
4421          */
4422         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4423         if (likely(!tg3_irq_sync(tp)))
4424                 netif_rx_schedule(dev, &tp->napi);
4425
4426         return IRQ_RETVAL(1);
4427 }
4428
4429 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4430 {
4431         struct net_device *dev = dev_id;
4432         struct tg3 *tp = netdev_priv(dev);
4433         struct tg3_hw_status *sblk = tp->hw_status;
4434         unsigned int handled = 1;
4435
4436         /* In INTx mode, it is possible for the interrupt to arrive at
4437          * the CPU before the status block posted prior to the interrupt.
4438          * Reading the PCI State register will confirm whether the
4439          * interrupt is ours and will flush the status block.
4440          */
4441         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4442                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4443                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4444                         handled = 0;
4445                         goto out;
4446                 }
4447         }
4448
4449         /*
4450          * Writing any value to intr-mbox-0 clears PCI INTA# and
4451          * chip-internal interrupt pending events.
4452          * Writing non-zero to intr-mbox-0 additional tells the
4453          * NIC to stop sending us irqs, engaging "in-intr-handler"
4454          * event coalescing.
4455          *
4456          * Flush the mailbox to de-assert the IRQ immediately to prevent
4457          * spurious interrupts.  The flush impacts performance but
4458          * excessive spurious interrupts can be worse in some cases.
4459          */
4460         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4461         if (tg3_irq_sync(tp))
4462                 goto out;
4463         sblk->status &= ~SD_STATUS_UPDATED;
4464         if (likely(tg3_has_work(tp))) {
4465                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4466                 netif_rx_schedule(dev, &tp->napi);
4467         } else {
4468                 /* No work, shared interrupt perhaps?  re-enable
4469                  * interrupts, and flush that PCI write
4470                  */
4471                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4472                                0x00000000);
4473         }
4474 out:
4475         return IRQ_RETVAL(handled);
4476 }
4477
4478 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4479 {
4480         struct net_device *dev = dev_id;
4481         struct tg3 *tp = netdev_priv(dev);
4482         struct tg3_hw_status *sblk = tp->hw_status;
4483         unsigned int handled = 1;
4484
4485         /* In INTx mode, it is possible for the interrupt to arrive at
4486          * the CPU before the status block posted prior to the interrupt.
4487          * Reading the PCI State register will confirm whether the
4488          * interrupt is ours and will flush the status block.
4489          */
4490         if (unlikely(sblk->status_tag == tp->last_tag)) {
4491                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4492                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4493                         handled = 0;
4494                         goto out;
4495                 }
4496         }
4497
4498         /*
4499          * writing any value to intr-mbox-0 clears PCI INTA# and
4500          * chip-internal interrupt pending events.
4501          * writing non-zero to intr-mbox-0 additional tells the
4502          * NIC to stop sending us irqs, engaging "in-intr-handler"
4503          * event coalescing.
4504          *
4505          * Flush the mailbox to de-assert the IRQ immediately to prevent
4506          * spurious interrupts.  The flush impacts performance but
4507          * excessive spurious interrupts can be worse in some cases.
4508          */
4509         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4510         if (tg3_irq_sync(tp))
4511                 goto out;
4512         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4513                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4514                 /* Update last_tag to mark that this status has been
4515                  * seen. Because interrupt may be shared, we may be
4516                  * racing with tg3_poll(), so only update last_tag
4517                  * if tg3_poll() is not scheduled.
4518                  */
4519                 tp->last_tag = sblk->status_tag;
4520                 __netif_rx_schedule(dev, &tp->napi);
4521         }
4522 out:
4523         return IRQ_RETVAL(handled);
4524 }
4525
4526 /* ISR for interrupt test */
4527 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4528 {
4529         struct net_device *dev = dev_id;
4530         struct tg3 *tp = netdev_priv(dev);
4531         struct tg3_hw_status *sblk = tp->hw_status;
4532
4533         if ((sblk->status & SD_STATUS_UPDATED) ||
4534             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4535                 tg3_disable_ints(tp);
4536                 return IRQ_RETVAL(1);
4537         }
4538         return IRQ_RETVAL(0);
4539 }
4540
4541 static int tg3_init_hw(struct tg3 *, int);
4542 static int tg3_halt(struct tg3 *, int, int);
4543
4544 /* Restart hardware after configuration changes, self-test, etc.
4545  * Invoked with tp->lock held.
4546  */
4547 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4548         __releases(tp->lock)
4549         __acquires(tp->lock)
4550 {
4551         int err;
4552
4553         err = tg3_init_hw(tp, reset_phy);
4554         if (err) {
4555                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4556                        "aborting.\n", tp->dev->name);
4557                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4558                 tg3_full_unlock(tp);
4559                 del_timer_sync(&tp->timer);
4560                 tp->irq_sync = 0;
4561                 napi_enable(&tp->napi);
4562                 dev_close(tp->dev);
4563                 tg3_full_lock(tp, 0);
4564         }
4565         return err;
4566 }
4567
4568 #ifdef CONFIG_NET_POLL_CONTROLLER
4569 static void tg3_poll_controller(struct net_device *dev)
4570 {
4571         struct tg3 *tp = netdev_priv(dev);
4572
4573         tg3_interrupt(tp->pdev->irq, dev);
4574 }
4575 #endif
4576
4577 static void tg3_reset_task(struct work_struct *work)
4578 {
4579         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4580         int err;
4581         unsigned int restart_timer;
4582
4583         tg3_full_lock(tp, 0);
4584
4585         if (!netif_running(tp->dev)) {
4586                 tg3_full_unlock(tp);
4587                 return;
4588         }
4589
4590         tg3_full_unlock(tp);
4591
4592         tg3_phy_stop(tp);
4593
4594         tg3_netif_stop(tp);
4595
4596         tg3_full_lock(tp, 1);
4597
4598         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4599         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4600
4601         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4602                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4603                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4604                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4605                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4606         }
4607
4608         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4609         err = tg3_init_hw(tp, 1);
4610         if (err)
4611                 goto out;
4612
4613         tg3_netif_start(tp);
4614
4615         if (restart_timer)
4616                 mod_timer(&tp->timer, jiffies + 1);
4617
4618 out:
4619         tg3_full_unlock(tp);
4620
4621         if (!err)
4622                 tg3_phy_start(tp);
4623 }
4624
4625 static void tg3_dump_short_state(struct tg3 *tp)
4626 {
4627         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4628                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4629         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4630                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4631 }
4632
4633 static void tg3_tx_timeout(struct net_device *dev)
4634 {
4635         struct tg3 *tp = netdev_priv(dev);
4636
4637         if (netif_msg_tx_err(tp)) {
4638                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4639                        dev->name);
4640                 tg3_dump_short_state(tp);
4641         }
4642
4643         schedule_work(&tp->reset_task);
4644 }
4645
4646 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4647 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4648 {
4649         u32 base = (u32) mapping & 0xffffffff;
4650
4651         return ((base > 0xffffdcc0) &&
4652                 (base + len + 8 < base));
4653 }
4654
4655 /* Test for DMA addresses > 40-bit */
4656 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4657                                           int len)
4658 {
4659 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4660         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4661                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4662         return 0;
4663 #else
4664         return 0;
4665 #endif
4666 }
4667
4668 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4669
4670 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4671 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4672                                        u32 last_plus_one, u32 *start,
4673                                        u32 base_flags, u32 mss)
4674 {
4675         struct sk_buff *new_skb;
4676         dma_addr_t new_addr = 0;
4677         u32 entry = *start;
4678         int i, ret = 0;
4679
4680         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4681                 new_skb = skb_copy(skb, GFP_ATOMIC);
4682         else {
4683                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4684
4685                 new_skb = skb_copy_expand(skb,
4686                                           skb_headroom(skb) + more_headroom,
4687                                           skb_tailroom(skb), GFP_ATOMIC);
4688         }
4689
4690         if (!new_skb) {
4691                 ret = -1;
4692         } else {
4693                 /* New SKB is guaranteed to be linear. */
4694                 entry = *start;
4695                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4696                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4697
4698                 /* Make sure new skb does not cross any 4G boundaries.
4699                  * Drop the packet if it does.
4700                  */
4701                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4702                         if (!ret)
4703                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4704                                               DMA_TO_DEVICE);
4705                         ret = -1;
4706                         dev_kfree_skb(new_skb);
4707                         new_skb = NULL;
4708                 } else {
4709                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4710                                     base_flags, 1 | (mss << 1));
4711                         *start = NEXT_TX(entry);
4712                 }
4713         }
4714
4715         /* Now clean up the sw ring entries. */
4716         i = 0;
4717         while (entry != last_plus_one) {
4718                 if (i == 0) {
4719                         tp->tx_buffers[entry].skb = new_skb;
4720                 } else {
4721                         tp->tx_buffers[entry].skb = NULL;
4722                 }
4723                 entry = NEXT_TX(entry);
4724                 i++;
4725         }
4726
4727         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4728         dev_kfree_skb(skb);
4729
4730         return ret;
4731 }
4732
4733 static void tg3_set_txd(struct tg3 *tp, int entry,
4734                         dma_addr_t mapping, int len, u32 flags,
4735                         u32 mss_and_is_end)
4736 {
4737         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4738         int is_end = (mss_and_is_end & 0x1);
4739         u32 mss = (mss_and_is_end >> 1);
4740         u32 vlan_tag = 0;
4741
4742         if (is_end)
4743                 flags |= TXD_FLAG_END;
4744         if (flags & TXD_FLAG_VLAN) {
4745                 vlan_tag = flags >> 16;
4746                 flags &= 0xffff;
4747         }
4748         vlan_tag |= (mss << TXD_MSS_SHIFT);
4749
4750         txd->addr_hi = ((u64) mapping >> 32);
4751         txd->addr_lo = ((u64) mapping & 0xffffffff);
4752         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4753         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4754 }
4755
4756 /* hard_start_xmit for devices that don't have any bugs and
4757  * support TG3_FLG2_HW_TSO_2 only.
4758  */
4759 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4760 {
4761         struct tg3 *tp = netdev_priv(dev);
4762         u32 len, entry, base_flags, mss;
4763         struct skb_shared_info *sp;
4764         dma_addr_t mapping;
4765
4766         len = skb_headlen(skb);
4767
4768         /* We are running in BH disabled context with netif_tx_lock
4769          * and TX reclaim runs via tp->napi.poll inside of a software
4770          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4771          * no IRQ context deadlocks to worry about either.  Rejoice!
4772          */
4773         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4774                 if (!netif_queue_stopped(dev)) {
4775                         netif_stop_queue(dev);
4776
4777                         /* This is a hard error, log it. */
4778                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4779                                "queue awake!\n", dev->name);
4780                 }
4781                 return NETDEV_TX_BUSY;
4782         }
4783
4784         entry = tp->tx_prod;
4785         base_flags = 0;
4786         mss = 0;
4787         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4788                 int tcp_opt_len, ip_tcp_len;
4789
4790                 if (skb_header_cloned(skb) &&
4791                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4792                         dev_kfree_skb(skb);
4793                         goto out_unlock;
4794                 }
4795
4796                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4797                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4798                 else {
4799                         struct iphdr *iph = ip_hdr(skb);
4800
4801                         tcp_opt_len = tcp_optlen(skb);
4802                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4803
4804                         iph->check = 0;
4805                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4806                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4807                 }
4808
4809                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4810                                TXD_FLAG_CPU_POST_DMA);
4811
4812                 tcp_hdr(skb)->check = 0;
4813
4814         }
4815         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4816                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4817 #if TG3_VLAN_TAG_USED
4818         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4819                 base_flags |= (TXD_FLAG_VLAN |
4820                                (vlan_tx_tag_get(skb) << 16));
4821 #endif
4822
4823         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4824                 dev_kfree_skb(skb);
4825                 goto out_unlock;
4826         }
4827
4828         sp = skb_shinfo(skb);
4829
4830         mapping = sp->dma_maps[0];
4831
4832         tp->tx_buffers[entry].skb = skb;
4833
4834         tg3_set_txd(tp, entry, mapping, len, base_flags,
4835                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4836
4837         entry = NEXT_TX(entry);
4838
4839         /* Now loop through additional data fragments, and queue them. */
4840         if (skb_shinfo(skb)->nr_frags > 0) {
4841                 unsigned int i, last;
4842
4843                 last = skb_shinfo(skb)->nr_frags - 1;
4844                 for (i = 0; i <= last; i++) {
4845                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4846
4847                         len = frag->size;
4848                         mapping = sp->dma_maps[i + 1];
4849                         tp->tx_buffers[entry].skb = NULL;
4850
4851                         tg3_set_txd(tp, entry, mapping, len,
4852                                     base_flags, (i == last) | (mss << 1));
4853
4854                         entry = NEXT_TX(entry);
4855                 }
4856         }
4857
4858         /* Packets are ready, update Tx producer idx local and on card. */
4859         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4860
4861         tp->tx_prod = entry;
4862         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4863                 netif_stop_queue(dev);
4864                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4865                         netif_wake_queue(tp->dev);
4866         }
4867
4868 out_unlock:
4869         mmiowb();
4870
4871         dev->trans_start = jiffies;
4872
4873         return NETDEV_TX_OK;
4874 }
4875
4876 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4877
4878 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4879  * TSO header is greater than 80 bytes.
4880  */
4881 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4882 {
4883         struct sk_buff *segs, *nskb;
4884
4885         /* Estimate the number of fragments in the worst case */
4886         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4887                 netif_stop_queue(tp->dev);
4888                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4889                         return NETDEV_TX_BUSY;
4890
4891                 netif_wake_queue(tp->dev);
4892         }
4893
4894         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4895         if (IS_ERR(segs))
4896                 goto tg3_tso_bug_end;
4897
4898         do {
4899                 nskb = segs;
4900                 segs = segs->next;
4901                 nskb->next = NULL;
4902                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4903         } while (segs);
4904
4905 tg3_tso_bug_end:
4906         dev_kfree_skb(skb);
4907
4908         return NETDEV_TX_OK;
4909 }
4910
4911 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4912  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4913  */
4914 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4915 {
4916         struct tg3 *tp = netdev_priv(dev);
4917         u32 len, entry, base_flags, mss;
4918         struct skb_shared_info *sp;
4919         int would_hit_hwbug;
4920         dma_addr_t mapping;
4921
4922         len = skb_headlen(skb);
4923
4924         /* We are running in BH disabled context with netif_tx_lock
4925          * and TX reclaim runs via tp->napi.poll inside of a software
4926          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4927          * no IRQ context deadlocks to worry about either.  Rejoice!
4928          */
4929         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4930                 if (!netif_queue_stopped(dev)) {
4931                         netif_stop_queue(dev);
4932
4933                         /* This is a hard error, log it. */
4934                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4935                                "queue awake!\n", dev->name);
4936                 }
4937                 return NETDEV_TX_BUSY;
4938         }
4939
4940         entry = tp->tx_prod;
4941         base_flags = 0;
4942         if (skb->ip_summed == CHECKSUM_PARTIAL)
4943                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4944         mss = 0;
4945         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4946                 struct iphdr *iph;
4947                 int tcp_opt_len, ip_tcp_len, hdr_len;
4948
4949                 if (skb_header_cloned(skb) &&
4950                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4951                         dev_kfree_skb(skb);
4952                         goto out_unlock;
4953                 }
4954
4955                 tcp_opt_len = tcp_optlen(skb);
4956                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4957
4958                 hdr_len = ip_tcp_len + tcp_opt_len;
4959                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4960                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4961                         return (tg3_tso_bug(tp, skb));
4962
4963                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4964                                TXD_FLAG_CPU_POST_DMA);
4965
4966                 iph = ip_hdr(skb);
4967                 iph->check = 0;
4968                 iph->tot_len = htons(mss + hdr_len);
4969                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4970                         tcp_hdr(skb)->check = 0;
4971                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4972                 } else
4973                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4974                                                                  iph->daddr, 0,
4975                                                                  IPPROTO_TCP,
4976                                                                  0);
4977
4978                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4979                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4980                         if (tcp_opt_len || iph->ihl > 5) {
4981                                 int tsflags;
4982
4983                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4984                                 mss |= (tsflags << 11);
4985                         }
4986                 } else {
4987                         if (tcp_opt_len || iph->ihl > 5) {
4988                                 int tsflags;
4989
4990                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4991                                 base_flags |= tsflags << 12;
4992                         }
4993                 }
4994         }
4995 #if TG3_VLAN_TAG_USED
4996         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4997                 base_flags |= (TXD_FLAG_VLAN |
4998                                (vlan_tx_tag_get(skb) << 16));
4999 #endif
5000
5001         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5002                 dev_kfree_skb(skb);
5003                 goto out_unlock;
5004         }
5005
5006         sp = skb_shinfo(skb);
5007
5008         mapping = sp->dma_maps[0];
5009
5010         tp->tx_buffers[entry].skb = skb;
5011
5012         would_hit_hwbug = 0;
5013
5014         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5015                 would_hit_hwbug = 1;
5016         else if (tg3_4g_overflow_test(mapping, len))
5017                 would_hit_hwbug = 1;
5018
5019         tg3_set_txd(tp, entry, mapping, len, base_flags,
5020                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5021
5022         entry = NEXT_TX(entry);
5023
5024         /* Now loop through additional data fragments, and queue them. */
5025         if (skb_shinfo(skb)->nr_frags > 0) {
5026                 unsigned int i, last;
5027
5028                 last = skb_shinfo(skb)->nr_frags - 1;
5029                 for (i = 0; i <= last; i++) {
5030                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5031
5032                         len = frag->size;
5033                         mapping = sp->dma_maps[i + 1];
5034
5035                         tp->tx_buffers[entry].skb = NULL;
5036
5037                         if (tg3_4g_overflow_test(mapping, len))
5038                                 would_hit_hwbug = 1;
5039
5040                         if (tg3_40bit_overflow_test(tp, mapping, len))
5041                                 would_hit_hwbug = 1;
5042
5043                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5044                                 tg3_set_txd(tp, entry, mapping, len,
5045                                             base_flags, (i == last)|(mss << 1));
5046                         else
5047                                 tg3_set_txd(tp, entry, mapping, len,
5048                                             base_flags, (i == last));
5049
5050                         entry = NEXT_TX(entry);
5051                 }
5052         }
5053
5054         if (would_hit_hwbug) {
5055                 u32 last_plus_one = entry;
5056                 u32 start;
5057
5058                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5059                 start &= (TG3_TX_RING_SIZE - 1);
5060
5061                 /* If the workaround fails due to memory/mapping
5062                  * failure, silently drop this packet.
5063                  */
5064                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5065                                                 &start, base_flags, mss))
5066                         goto out_unlock;
5067
5068                 entry = start;
5069         }
5070
5071         /* Packets are ready, update Tx producer idx local and on card. */
5072         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5073
5074         tp->tx_prod = entry;
5075         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5076                 netif_stop_queue(dev);
5077                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5078                         netif_wake_queue(tp->dev);
5079         }
5080
5081 out_unlock:
5082         mmiowb();
5083
5084         dev->trans_start = jiffies;
5085
5086         return NETDEV_TX_OK;
5087 }
5088
5089 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5090                                int new_mtu)
5091 {
5092         dev->mtu = new_mtu;
5093
5094         if (new_mtu > ETH_DATA_LEN) {
5095                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5096                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5097                         ethtool_op_set_tso(dev, 0);
5098                 }
5099                 else
5100                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5101         } else {
5102                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5103                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5104                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5105         }
5106 }
5107
5108 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5109 {
5110         struct tg3 *tp = netdev_priv(dev);
5111         int err;
5112
5113         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5114                 return -EINVAL;
5115
5116         if (!netif_running(dev)) {
5117                 /* We'll just catch it later when the
5118                  * device is up'd.
5119                  */
5120                 tg3_set_mtu(dev, tp, new_mtu);
5121                 return 0;
5122         }
5123
5124         tg3_phy_stop(tp);
5125
5126         tg3_netif_stop(tp);
5127
5128         tg3_full_lock(tp, 1);
5129
5130         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5131
5132         tg3_set_mtu(dev, tp, new_mtu);
5133
5134         err = tg3_restart_hw(tp, 0);
5135
5136         if (!err)
5137                 tg3_netif_start(tp);
5138
5139         tg3_full_unlock(tp);
5140
5141         if (!err)
5142                 tg3_phy_start(tp);
5143
5144         return err;
5145 }
5146
5147 /* Free up pending packets in all rx/tx rings.
5148  *
5149  * The chip has been shut down and the driver detached from
5150  * the networking, so no interrupts or new tx packets will
5151  * end up in the driver.  tp->{tx,}lock is not held and we are not
5152  * in an interrupt context and thus may sleep.
5153  */
5154 static void tg3_free_rings(struct tg3 *tp)
5155 {
5156         struct ring_info *rxp;
5157         int i;
5158
5159         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5160                 rxp = &tp->rx_std_buffers[i];
5161
5162                 if (rxp->skb == NULL)
5163                         continue;
5164                 pci_unmap_single(tp->pdev,
5165                                  pci_unmap_addr(rxp, mapping),
5166                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5167                                  PCI_DMA_FROMDEVICE);
5168                 dev_kfree_skb_any(rxp->skb);
5169                 rxp->skb = NULL;
5170         }
5171
5172         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5173                 rxp = &tp->rx_jumbo_buffers[i];
5174
5175                 if (rxp->skb == NULL)
5176                         continue;
5177                 pci_unmap_single(tp->pdev,
5178                                  pci_unmap_addr(rxp, mapping),
5179                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5180                                  PCI_DMA_FROMDEVICE);
5181                 dev_kfree_skb_any(rxp->skb);
5182                 rxp->skb = NULL;
5183         }
5184
5185         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5186                 struct tx_ring_info *txp;
5187                 struct sk_buff *skb;
5188
5189                 txp = &tp->tx_buffers[i];
5190                 skb = txp->skb;
5191
5192                 if (skb == NULL) {
5193                         i++;
5194                         continue;
5195                 }
5196
5197                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5198
5199                 txp->skb = NULL;
5200
5201                 i += skb_shinfo(skb)->nr_frags + 1;
5202
5203                 dev_kfree_skb_any(skb);
5204         }
5205 }
5206
5207 /* Initialize tx/rx rings for packet processing.
5208  *
5209  * The chip has been shut down and the driver detached from
5210  * the networking, so no interrupts or new tx packets will
5211  * end up in the driver.  tp->{tx,}lock are held and thus
5212  * we may not sleep.
5213  */
5214 static int tg3_init_rings(struct tg3 *tp)
5215 {
5216         u32 i;
5217
5218         /* Free up all the SKBs. */
5219         tg3_free_rings(tp);
5220
5221         /* Zero out all descriptors. */
5222         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5223         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5224         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5225         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5226
5227         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5228         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5229             (tp->dev->mtu > ETH_DATA_LEN))
5230                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5231
5232         /* Initialize invariants of the rings, we only set this
5233          * stuff once.  This works because the card does not
5234          * write into the rx buffer posting rings.
5235          */
5236         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5237                 struct tg3_rx_buffer_desc *rxd;
5238
5239                 rxd = &tp->rx_std[i];
5240                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5241                         << RXD_LEN_SHIFT;
5242                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5243                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5244                                (i << RXD_OPAQUE_INDEX_SHIFT));
5245         }
5246
5247         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5248                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5249                         struct tg3_rx_buffer_desc *rxd;
5250
5251                         rxd = &tp->rx_jumbo[i];
5252                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5253                                 << RXD_LEN_SHIFT;
5254                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5255                                 RXD_FLAG_JUMBO;
5256                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5257                                (i << RXD_OPAQUE_INDEX_SHIFT));
5258                 }
5259         }
5260
5261         /* Now allocate fresh SKBs for each rx ring. */
5262         for (i = 0; i < tp->rx_pending; i++) {
5263                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5264                         printk(KERN_WARNING PFX
5265                                "%s: Using a smaller RX standard ring, "
5266                                "only %d out of %d buffers were allocated "
5267                                "successfully.\n",
5268                                tp->dev->name, i, tp->rx_pending);
5269                         if (i == 0)
5270                                 return -ENOMEM;
5271                         tp->rx_pending = i;
5272                         break;
5273                 }
5274         }
5275
5276         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5277                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5278                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5279                                              -1, i) < 0) {
5280                                 printk(KERN_WARNING PFX
5281                                        "%s: Using a smaller RX jumbo ring, "
5282                                        "only %d out of %d buffers were "
5283                                        "allocated successfully.\n",
5284                                        tp->dev->name, i, tp->rx_jumbo_pending);
5285                                 if (i == 0) {
5286                                         tg3_free_rings(tp);
5287                                         return -ENOMEM;
5288                                 }
5289                                 tp->rx_jumbo_pending = i;
5290                                 break;
5291                         }
5292                 }
5293         }
5294         return 0;
5295 }
5296
5297 /*
5298  * Must not be invoked with interrupt sources disabled and
5299  * the hardware shutdown down.
5300  */
5301 static void tg3_free_consistent(struct tg3 *tp)
5302 {
5303         kfree(tp->rx_std_buffers);
5304         tp->rx_std_buffers = NULL;
5305         if (tp->rx_std) {
5306                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5307                                     tp->rx_std, tp->rx_std_mapping);
5308                 tp->rx_std = NULL;
5309         }
5310         if (tp->rx_jumbo) {
5311                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5312                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5313                 tp->rx_jumbo = NULL;
5314         }
5315         if (tp->rx_rcb) {
5316                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5317                                     tp->rx_rcb, tp->rx_rcb_mapping);
5318                 tp->rx_rcb = NULL;
5319         }
5320         if (tp->tx_ring) {
5321                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5322                         tp->tx_ring, tp->tx_desc_mapping);
5323                 tp->tx_ring = NULL;
5324         }
5325         if (tp->hw_status) {
5326                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5327                                     tp->hw_status, tp->status_mapping);
5328                 tp->hw_status = NULL;
5329         }
5330         if (tp->hw_stats) {
5331                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5332                                     tp->hw_stats, tp->stats_mapping);
5333                 tp->hw_stats = NULL;
5334         }
5335 }
5336
5337 /*
5338  * Must not be invoked with interrupt sources disabled and
5339  * the hardware shutdown down.  Can sleep.
5340  */
5341 static int tg3_alloc_consistent(struct tg3 *tp)
5342 {
5343         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5344                                       (TG3_RX_RING_SIZE +
5345                                        TG3_RX_JUMBO_RING_SIZE)) +
5346                                      (sizeof(struct tx_ring_info) *
5347                                       TG3_TX_RING_SIZE),
5348                                      GFP_KERNEL);
5349         if (!tp->rx_std_buffers)
5350                 return -ENOMEM;
5351
5352         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5353         tp->tx_buffers = (struct tx_ring_info *)
5354                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5355
5356         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5357                                           &tp->rx_std_mapping);
5358         if (!tp->rx_std)
5359                 goto err_out;
5360
5361         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5362                                             &tp->rx_jumbo_mapping);
5363
5364         if (!tp->rx_jumbo)
5365                 goto err_out;
5366
5367         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5368                                           &tp->rx_rcb_mapping);
5369         if (!tp->rx_rcb)
5370                 goto err_out;
5371
5372         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5373                                            &tp->tx_desc_mapping);
5374         if (!tp->tx_ring)
5375                 goto err_out;
5376
5377         tp->hw_status = pci_alloc_consistent(tp->pdev,
5378                                              TG3_HW_STATUS_SIZE,
5379                                              &tp->status_mapping);
5380         if (!tp->hw_status)
5381                 goto err_out;
5382
5383         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5384                                             sizeof(struct tg3_hw_stats),
5385                                             &tp->stats_mapping);
5386         if (!tp->hw_stats)
5387                 goto err_out;
5388
5389         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5390         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5391
5392         return 0;
5393
5394 err_out:
5395         tg3_free_consistent(tp);
5396         return -ENOMEM;
5397 }
5398
5399 #define MAX_WAIT_CNT 1000
5400
5401 /* To stop a block, clear the enable bit and poll till it
5402  * clears.  tp->lock is held.
5403  */
5404 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5405 {
5406         unsigned int i;
5407         u32 val;
5408
5409         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5410                 switch (ofs) {
5411                 case RCVLSC_MODE:
5412                 case DMAC_MODE:
5413                 case MBFREE_MODE:
5414                 case BUFMGR_MODE:
5415                 case MEMARB_MODE:
5416                         /* We can't enable/disable these bits of the
5417                          * 5705/5750, just say success.
5418                          */
5419                         return 0;
5420
5421                 default:
5422                         break;
5423                 }
5424         }
5425
5426         val = tr32(ofs);
5427         val &= ~enable_bit;
5428         tw32_f(ofs, val);
5429
5430         for (i = 0; i < MAX_WAIT_CNT; i++) {
5431                 udelay(100);
5432                 val = tr32(ofs);
5433                 if ((val & enable_bit) == 0)
5434                         break;
5435         }
5436
5437         if (i == MAX_WAIT_CNT && !silent) {
5438                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5439                        "ofs=%lx enable_bit=%x\n",
5440                        ofs, enable_bit);
5441                 return -ENODEV;
5442         }
5443
5444         return 0;
5445 }
5446
5447 /* tp->lock is held. */
5448 static int tg3_abort_hw(struct tg3 *tp, int silent)
5449 {
5450         int i, err;
5451
5452         tg3_disable_ints(tp);
5453
5454         tp->rx_mode &= ~RX_MODE_ENABLE;
5455         tw32_f(MAC_RX_MODE, tp->rx_mode);
5456         udelay(10);
5457
5458         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5459         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5460         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5461         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5462         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5463         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5464
5465         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5466         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5467         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5468         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5469         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5470         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5471         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5472
5473         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5474         tw32_f(MAC_MODE, tp->mac_mode);
5475         udelay(40);
5476
5477         tp->tx_mode &= ~TX_MODE_ENABLE;
5478         tw32_f(MAC_TX_MODE, tp->tx_mode);
5479
5480         for (i = 0; i < MAX_WAIT_CNT; i++) {
5481                 udelay(100);
5482                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5483                         break;
5484         }
5485         if (i >= MAX_WAIT_CNT) {
5486                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5487                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5488                        tp->dev->name, tr32(MAC_TX_MODE));
5489                 err |= -ENODEV;
5490         }
5491
5492         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5493         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5494         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5495
5496         tw32(FTQ_RESET, 0xffffffff);
5497         tw32(FTQ_RESET, 0x00000000);
5498
5499         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5500         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5501
5502         if (tp->hw_status)
5503                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5504         if (tp->hw_stats)
5505                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5506
5507         return err;
5508 }
5509
5510 /* tp->lock is held. */
5511 static int tg3_nvram_lock(struct tg3 *tp)
5512 {
5513         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5514                 int i;
5515
5516                 if (tp->nvram_lock_cnt == 0) {
5517                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5518                         for (i = 0; i < 8000; i++) {
5519                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5520                                         break;
5521                                 udelay(20);
5522                         }
5523                         if (i == 8000) {
5524                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5525                                 return -ENODEV;
5526                         }
5527                 }
5528                 tp->nvram_lock_cnt++;
5529         }
5530         return 0;
5531 }
5532
5533 /* tp->lock is held. */
5534 static void tg3_nvram_unlock(struct tg3 *tp)
5535 {
5536         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5537                 if (tp->nvram_lock_cnt > 0)
5538                         tp->nvram_lock_cnt--;
5539                 if (tp->nvram_lock_cnt == 0)
5540                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5541         }
5542 }
5543
5544 /* tp->lock is held. */
5545 static void tg3_enable_nvram_access(struct tg3 *tp)
5546 {
5547         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5548             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5549                 u32 nvaccess = tr32(NVRAM_ACCESS);
5550
5551                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5552         }
5553 }
5554
5555 /* tp->lock is held. */
5556 static void tg3_disable_nvram_access(struct tg3 *tp)
5557 {
5558         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5559             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5560                 u32 nvaccess = tr32(NVRAM_ACCESS);
5561
5562                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5563         }
5564 }
5565
5566 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5567 {
5568         int i;
5569         u32 apedata;
5570
5571         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5572         if (apedata != APE_SEG_SIG_MAGIC)
5573                 return;
5574
5575         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5576         if (!(apedata & APE_FW_STATUS_READY))
5577                 return;
5578
5579         /* Wait for up to 1 millisecond for APE to service previous event. */
5580         for (i = 0; i < 10; i++) {
5581                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5582                         return;
5583
5584                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5585
5586                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5587                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5588                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5589
5590                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5591
5592                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5593                         break;
5594
5595                 udelay(100);
5596         }
5597
5598         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5599                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5600 }
5601
5602 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5603 {
5604         u32 event;
5605         u32 apedata;
5606
5607         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5608                 return;
5609
5610         switch (kind) {
5611                 case RESET_KIND_INIT:
5612                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5613                                         APE_HOST_SEG_SIG_MAGIC);
5614                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5615                                         APE_HOST_SEG_LEN_MAGIC);
5616                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5617                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5618                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5619                                         APE_HOST_DRIVER_ID_MAGIC);
5620                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5621                                         APE_HOST_BEHAV_NO_PHYLOCK);
5622
5623                         event = APE_EVENT_STATUS_STATE_START;
5624                         break;
5625                 case RESET_KIND_SHUTDOWN:
5626                         /* With the interface we are currently using,
5627                          * APE does not track driver state.  Wiping
5628                          * out the HOST SEGMENT SIGNATURE forces
5629                          * the APE to assume OS absent status.
5630                          */
5631                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5632
5633                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5634                         break;
5635                 case RESET_KIND_SUSPEND:
5636                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5637                         break;
5638                 default:
5639                         return;
5640         }
5641
5642         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5643
5644         tg3_ape_send_event(tp, event);
5645 }
5646
5647 /* tp->lock is held. */
5648 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5649 {
5650         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5651                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5652
5653         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5654                 switch (kind) {
5655                 case RESET_KIND_INIT:
5656                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5657                                       DRV_STATE_START);
5658                         break;
5659
5660                 case RESET_KIND_SHUTDOWN:
5661                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5662                                       DRV_STATE_UNLOAD);
5663                         break;
5664
5665                 case RESET_KIND_SUSPEND:
5666                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5667                                       DRV_STATE_SUSPEND);
5668                         break;
5669
5670                 default:
5671                         break;
5672                 }
5673         }
5674
5675         if (kind == RESET_KIND_INIT ||
5676             kind == RESET_KIND_SUSPEND)
5677                 tg3_ape_driver_state_change(tp, kind);
5678 }
5679
5680 /* tp->lock is held. */
5681 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5682 {
5683         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5684                 switch (kind) {
5685                 case RESET_KIND_INIT:
5686                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5687                                       DRV_STATE_START_DONE);
5688                         break;
5689
5690                 case RESET_KIND_SHUTDOWN:
5691                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5692                                       DRV_STATE_UNLOAD_DONE);
5693                         break;
5694
5695                 default:
5696                         break;
5697                 }
5698         }
5699
5700         if (kind == RESET_KIND_SHUTDOWN)
5701                 tg3_ape_driver_state_change(tp, kind);
5702 }
5703
5704 /* tp->lock is held. */
5705 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5706 {
5707         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5708                 switch (kind) {
5709                 case RESET_KIND_INIT:
5710                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5711                                       DRV_STATE_START);
5712                         break;
5713
5714                 case RESET_KIND_SHUTDOWN:
5715                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5716                                       DRV_STATE_UNLOAD);
5717                         break;
5718
5719                 case RESET_KIND_SUSPEND:
5720                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5721                                       DRV_STATE_SUSPEND);
5722                         break;
5723
5724                 default:
5725                         break;
5726                 }
5727         }
5728 }
5729
5730 static int tg3_poll_fw(struct tg3 *tp)
5731 {
5732         int i;
5733         u32 val;
5734
5735         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5736                 /* Wait up to 20ms for init done. */
5737                 for (i = 0; i < 200; i++) {
5738                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5739                                 return 0;
5740                         udelay(100);
5741                 }
5742                 return -ENODEV;
5743         }
5744
5745         /* Wait for firmware initialization to complete. */
5746         for (i = 0; i < 100000; i++) {
5747                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5748                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5749                         break;
5750                 udelay(10);
5751         }
5752
5753         /* Chip might not be fitted with firmware.  Some Sun onboard
5754          * parts are configured like that.  So don't signal the timeout
5755          * of the above loop as an error, but do report the lack of
5756          * running firmware once.
5757          */
5758         if (i >= 100000 &&
5759             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5760                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5761
5762                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5763                        tp->dev->name);
5764         }
5765
5766         return 0;
5767 }
5768
5769 /* Save PCI command register before chip reset */
5770 static void tg3_save_pci_state(struct tg3 *tp)
5771 {
5772         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5773 }
5774
5775 /* Restore PCI state after chip reset */
5776 static void tg3_restore_pci_state(struct tg3 *tp)
5777 {
5778         u32 val;
5779
5780         /* Re-enable indirect register accesses. */
5781         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5782                                tp->misc_host_ctrl);
5783
5784         /* Set MAX PCI retry to zero. */
5785         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5786         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5787             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5788                 val |= PCISTATE_RETRY_SAME_DMA;
5789         /* Allow reads and writes to the APE register and memory space. */
5790         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5791                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5792                        PCISTATE_ALLOW_APE_SHMEM_WR;
5793         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5794
5795         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5796
5797         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5798                 pcie_set_readrq(tp->pdev, 4096);
5799         else {
5800                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5801                                       tp->pci_cacheline_sz);
5802                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5803                                       tp->pci_lat_timer);
5804         }
5805
5806         /* Make sure PCI-X relaxed ordering bit is clear. */
5807         if (tp->pcix_cap) {
5808                 u16 pcix_cmd;
5809
5810                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5811                                      &pcix_cmd);
5812                 pcix_cmd &= ~PCI_X_CMD_ERO;
5813                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5814                                       pcix_cmd);
5815         }
5816
5817         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5818
5819                 /* Chip reset on 5780 will reset MSI enable bit,
5820                  * so need to restore it.
5821                  */
5822                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5823                         u16 ctrl;
5824
5825                         pci_read_config_word(tp->pdev,
5826                                              tp->msi_cap + PCI_MSI_FLAGS,
5827                                              &ctrl);
5828                         pci_write_config_word(tp->pdev,
5829                                               tp->msi_cap + PCI_MSI_FLAGS,
5830                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5831                         val = tr32(MSGINT_MODE);
5832                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5833                 }
5834         }
5835 }
5836
5837 static void tg3_stop_fw(struct tg3 *);
5838
5839 /* tp->lock is held. */
5840 static int tg3_chip_reset(struct tg3 *tp)
5841 {
5842         u32 val;
5843         void (*write_op)(struct tg3 *, u32, u32);
5844         int err;
5845
5846         tg3_nvram_lock(tp);
5847
5848         tg3_mdio_stop(tp);
5849
5850         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5851
5852         /* No matching tg3_nvram_unlock() after this because
5853          * chip reset below will undo the nvram lock.
5854          */
5855         tp->nvram_lock_cnt = 0;
5856
5857         /* GRC_MISC_CFG core clock reset will clear the memory
5858          * enable bit in PCI register 4 and the MSI enable bit
5859          * on some chips, so we save relevant registers here.
5860          */
5861         tg3_save_pci_state(tp);
5862
5863         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5864             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5865             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5866             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5867             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5868             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5869                 tw32(GRC_FASTBOOT_PC, 0);
5870
5871         /*
5872          * We must avoid the readl() that normally takes place.
5873          * It locks machines, causes machine checks, and other
5874          * fun things.  So, temporarily disable the 5701
5875          * hardware workaround, while we do the reset.
5876          */
5877         write_op = tp->write32;
5878         if (write_op == tg3_write_flush_reg32)
5879                 tp->write32 = tg3_write32;
5880
5881         /* Prevent the irq handler from reading or writing PCI registers
5882          * during chip reset when the memory enable bit in the PCI command
5883          * register may be cleared.  The chip does not generate interrupt
5884          * at this time, but the irq handler may still be called due to irq
5885          * sharing or irqpoll.
5886          */
5887         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5888         if (tp->hw_status) {
5889                 tp->hw_status->status = 0;
5890                 tp->hw_status->status_tag = 0;
5891         }
5892         tp->last_tag = 0;
5893         smp_mb();
5894         synchronize_irq(tp->pdev->irq);
5895
5896         /* do the reset */
5897         val = GRC_MISC_CFG_CORECLK_RESET;
5898
5899         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5900                 if (tr32(0x7e2c) == 0x60) {
5901                         tw32(0x7e2c, 0x20);
5902                 }
5903                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5904                         tw32(GRC_MISC_CFG, (1 << 29));
5905                         val |= (1 << 29);
5906                 }
5907         }
5908
5909         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5910                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5911                 tw32(GRC_VCPU_EXT_CTRL,
5912                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5913         }
5914
5915         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5916                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5917         tw32(GRC_MISC_CFG, val);
5918
5919         /* restore 5701 hardware bug workaround write method */
5920         tp->write32 = write_op;
5921
5922         /* Unfortunately, we have to delay before the PCI read back.
5923          * Some 575X chips even will not respond to a PCI cfg access
5924          * when the reset command is given to the chip.
5925          *
5926          * How do these hardware designers expect things to work
5927          * properly if the PCI write is posted for a long period
5928          * of time?  It is always necessary to have some method by
5929          * which a register read back can occur to push the write
5930          * out which does the reset.
5931          *
5932          * For most tg3 variants the trick below was working.
5933          * Ho hum...
5934          */
5935         udelay(120);
5936
5937         /* Flush PCI posted writes.  The normal MMIO registers
5938          * are inaccessible at this time so this is the only
5939          * way to make this reliably (actually, this is no longer
5940          * the case, see above).  I tried to use indirect
5941          * register read/write but this upset some 5701 variants.
5942          */
5943         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5944
5945         udelay(120);
5946
5947         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5948                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5949                         int i;
5950                         u32 cfg_val;
5951
5952                         /* Wait for link training to complete.  */
5953                         for (i = 0; i < 5000; i++)
5954                                 udelay(100);
5955
5956                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5957                         pci_write_config_dword(tp->pdev, 0xc4,
5958                                                cfg_val | (1 << 15));
5959                 }
5960                 /* Set PCIE max payload size and clear error status.  */
5961                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5962         }
5963
5964         tg3_restore_pci_state(tp);
5965
5966         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5967
5968         val = 0;
5969         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5970                 val = tr32(MEMARB_MODE);
5971         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5972
5973         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5974                 tg3_stop_fw(tp);
5975                 tw32(0x5000, 0x400);
5976         }
5977
5978         tw32(GRC_MODE, tp->grc_mode);
5979
5980         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5981                 val = tr32(0xc4);
5982
5983                 tw32(0xc4, val | (1 << 15));
5984         }
5985
5986         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5987             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5988                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5989                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5990                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5991                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5992         }
5993
5994         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5995                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5996                 tw32_f(MAC_MODE, tp->mac_mode);
5997         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5998                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5999                 tw32_f(MAC_MODE, tp->mac_mode);
6000         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6001                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6002                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6003                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6004                 tw32_f(MAC_MODE, tp->mac_mode);
6005         } else
6006                 tw32_f(MAC_MODE, 0);
6007         udelay(40);
6008
6009         tg3_mdio_start(tp);
6010
6011         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6012
6013         err = tg3_poll_fw(tp);
6014         if (err)
6015                 return err;
6016
6017         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6018             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6019                 val = tr32(0x7c00);
6020
6021                 tw32(0x7c00, val | (1 << 25));
6022         }
6023
6024         /* Reprobe ASF enable state.  */
6025         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6026         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6027         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6028         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6029                 u32 nic_cfg;
6030
6031                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6032                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6033                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6034                         tp->last_event_jiffies = jiffies;
6035                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6036                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6037                 }
6038         }
6039
6040         return 0;
6041 }
6042
6043 /* tp->lock is held. */
6044 static void tg3_stop_fw(struct tg3 *tp)
6045 {
6046         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6047            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6048                 /* Wait for RX cpu to ACK the previous event. */
6049                 tg3_wait_for_event_ack(tp);
6050
6051                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6052
6053                 tg3_generate_fw_event(tp);
6054
6055                 /* Wait for RX cpu to ACK this event. */
6056                 tg3_wait_for_event_ack(tp);
6057         }
6058 }
6059
6060 /* tp->lock is held. */
6061 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6062 {
6063         int err;
6064
6065         tg3_stop_fw(tp);
6066
6067         tg3_write_sig_pre_reset(tp, kind);
6068
6069         tg3_abort_hw(tp, silent);
6070         err = tg3_chip_reset(tp);
6071
6072         tg3_write_sig_legacy(tp, kind);
6073         tg3_write_sig_post_reset(tp, kind);
6074
6075         if (err)
6076                 return err;
6077
6078         return 0;
6079 }
6080
6081 #define TG3_FW_RELEASE_MAJOR    0x0
6082 #define TG3_FW_RELASE_MINOR     0x0
6083 #define TG3_FW_RELEASE_FIX      0x0
6084 #define TG3_FW_START_ADDR       0x08000000
6085 #define TG3_FW_TEXT_ADDR        0x08000000
6086 #define TG3_FW_TEXT_LEN         0x9c0
6087 #define TG3_FW_RODATA_ADDR      0x080009c0
6088 #define TG3_FW_RODATA_LEN       0x60
6089 #define TG3_FW_DATA_ADDR        0x08000a40
6090 #define TG3_FW_DATA_LEN         0x20
6091 #define TG3_FW_SBSS_ADDR        0x08000a60
6092 #define TG3_FW_SBSS_LEN         0xc
6093 #define TG3_FW_BSS_ADDR         0x08000a70
6094 #define TG3_FW_BSS_LEN          0x10
6095
6096 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6097         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6098         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6099         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6100         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6101         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6102         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6103         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6104         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6105         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6106         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6107         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6108         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6109         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6110         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6111         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6112         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6113         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6114         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6115         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6116         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6117         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6118         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6119         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6120         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6121         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6122         0, 0, 0, 0, 0, 0,
6123         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6124         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6125         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6126         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6127         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6128         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6129         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6130         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6131         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6132         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6133         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6134         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6135         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6136         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6137         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6138         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6139         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6140         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6141         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6142         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6143         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6144         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6145         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6146         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6147         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6148         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6149         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6150         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6151         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6152         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6153         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6154         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6155         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6156         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6157         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6158         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6159         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6160         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6161         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6162         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6163         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6164         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6165         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6166         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6167         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6168         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6169         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6170         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6171         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6172         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6173         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6174         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6175         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6176         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6177         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6178         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6179         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6180         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6181         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6182         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6183         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6184         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6185         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6186         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6187         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6188 };
6189
6190 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6191         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6192         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6193         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6194         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6195         0x00000000
6196 };
6197
6198 #if 0 /* All zeros, don't eat up space with it. */
6199 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6200         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6201         0x00000000, 0x00000000, 0x00000000, 0x00000000
6202 };
6203 #endif
6204
6205 #define RX_CPU_SCRATCH_BASE     0x30000
6206 #define RX_CPU_SCRATCH_SIZE     0x04000
6207 #define TX_CPU_SCRATCH_BASE     0x34000
6208 #define TX_CPU_SCRATCH_SIZE     0x04000
6209
6210 /* tp->lock is held. */
6211 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6212 {
6213         int i;
6214
6215         BUG_ON(offset == TX_CPU_BASE &&
6216             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6217
6218         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6219                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6220
6221                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6222                 return 0;
6223         }
6224         if (offset == RX_CPU_BASE) {
6225                 for (i = 0; i < 10000; i++) {
6226                         tw32(offset + CPU_STATE, 0xffffffff);
6227                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6228                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6229                                 break;
6230                 }
6231
6232                 tw32(offset + CPU_STATE, 0xffffffff);
6233                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6234                 udelay(10);
6235         } else {
6236                 for (i = 0; i < 10000; i++) {
6237                         tw32(offset + CPU_STATE, 0xffffffff);
6238                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6239                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6240                                 break;
6241                 }
6242         }
6243
6244         if (i >= 10000) {
6245                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6246                        "and %s CPU\n",
6247                        tp->dev->name,
6248                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6249                 return -ENODEV;
6250         }
6251
6252         /* Clear firmware's nvram arbitration. */
6253         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6254                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6255         return 0;
6256 }
6257
6258 struct fw_info {
6259         unsigned int text_base;
6260         unsigned int text_len;
6261         const u32 *text_data;
6262         unsigned int rodata_base;
6263         unsigned int rodata_len;
6264         const u32 *rodata_data;
6265         unsigned int data_base;
6266         unsigned int data_len;
6267         const u32 *data_data;
6268 };
6269
6270 /* tp->lock is held. */
6271 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6272                                  int cpu_scratch_size, struct fw_info *info)
6273 {
6274         int err, lock_err, i;
6275         void (*write_op)(struct tg3 *, u32, u32);
6276
6277         if (cpu_base == TX_CPU_BASE &&
6278             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6279                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6280                        "TX cpu firmware on %s which is 5705.\n",
6281                        tp->dev->name);
6282                 return -EINVAL;
6283         }
6284
6285         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6286                 write_op = tg3_write_mem;
6287         else
6288                 write_op = tg3_write_indirect_reg32;
6289
6290         /* It is possible that bootcode is still loading at this point.
6291          * Get the nvram lock first before halting the cpu.
6292          */
6293         lock_err = tg3_nvram_lock(tp);
6294         err = tg3_halt_cpu(tp, cpu_base);
6295         if (!lock_err)
6296                 tg3_nvram_unlock(tp);
6297         if (err)
6298                 goto out;
6299
6300         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6301                 write_op(tp, cpu_scratch_base + i, 0);
6302         tw32(cpu_base + CPU_STATE, 0xffffffff);
6303         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6304         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6305                 write_op(tp, (cpu_scratch_base +
6306                               (info->text_base & 0xffff) +
6307                               (i * sizeof(u32))),
6308                          (info->text_data ?
6309                           info->text_data[i] : 0));
6310         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6311                 write_op(tp, (cpu_scratch_base +
6312                               (info->rodata_base & 0xffff) +
6313                               (i * sizeof(u32))),
6314                          (info->rodata_data ?
6315                           info->rodata_data[i] : 0));
6316         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6317                 write_op(tp, (cpu_scratch_base +
6318                               (info->data_base & 0xffff) +
6319                               (i * sizeof(u32))),
6320                          (info->data_data ?
6321                           info->data_data[i] : 0));
6322
6323         err = 0;
6324
6325 out:
6326         return err;
6327 }
6328
6329 /* tp->lock is held. */
6330 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6331 {
6332         struct fw_info info;
6333         int err, i;
6334
6335         info.text_base = TG3_FW_TEXT_ADDR;
6336         info.text_len = TG3_FW_TEXT_LEN;
6337         info.text_data = &tg3FwText[0];
6338         info.rodata_base = TG3_FW_RODATA_ADDR;
6339         info.rodata_len = TG3_FW_RODATA_LEN;
6340         info.rodata_data = &tg3FwRodata[0];
6341         info.data_base = TG3_FW_DATA_ADDR;
6342         info.data_len = TG3_FW_DATA_LEN;
6343         info.data_data = NULL;
6344
6345         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6346                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6347                                     &info);
6348         if (err)
6349                 return err;
6350
6351         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6352                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6353                                     &info);
6354         if (err)
6355                 return err;
6356
6357         /* Now startup only the RX cpu. */
6358         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6359         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6360
6361         for (i = 0; i < 5; i++) {
6362                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6363                         break;
6364                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6365                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6366                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6367                 udelay(1000);
6368         }
6369         if (i >= 5) {
6370                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6371                        "to set RX CPU PC, is %08x should be %08x\n",
6372                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6373                        TG3_FW_TEXT_ADDR);
6374                 return -ENODEV;
6375         }
6376         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6377         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6378
6379         return 0;
6380 }
6381
6382
6383 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6384 #define TG3_TSO_FW_RELASE_MINOR         0x6
6385 #define TG3_TSO_FW_RELEASE_FIX          0x0
6386 #define TG3_TSO_FW_START_ADDR           0x08000000
6387 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6388 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6389 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6390 #define TG3_TSO_FW_RODATA_LEN           0x60
6391 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6392 #define TG3_TSO_FW_DATA_LEN             0x30
6393 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6394 #define TG3_TSO_FW_SBSS_LEN             0x2c
6395 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6396 #define TG3_TSO_FW_BSS_LEN              0x894
6397
6398 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6399         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6400         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6401         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6402         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6403         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6404         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6405         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6406         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6407         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6408         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6409         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6410         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6411         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6412         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6413         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6414         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6415         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6416         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6417         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6418         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6419         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6420         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6421         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6422         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6423         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6424         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6425         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6426         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6427         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6428         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6429         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6430         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6431         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6432         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6433         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6434         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6435         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6436         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6437         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6438         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6439         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6440         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6441         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6442         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6443         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6444         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6445         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6446         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6447         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6448         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6449         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6450         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6451         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6452         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6453         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6454         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6455         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6456         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6457         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6458         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6459         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6460         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6461         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6462         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6463         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6464         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6465         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6466         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6467         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6468         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6469         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6470         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6471         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6472         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6473         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6474         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6475         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6476         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6477         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6478         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6479         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6480         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6481         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6482         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6483         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6484         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6485         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6486         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6487         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6488         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6489         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6490         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6491         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6492         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6493         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6494         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6495         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6496         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6497         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6498         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6499         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6500         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6501         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6502         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6503         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6504         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6505         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6506         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6507         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6508         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6509         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6510         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6511         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6512         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6513         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6514         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6515         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6516         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6517         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6518         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6519         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6520         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6521         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6522         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6523         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6524         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6525         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6526         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6527         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6528         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6529         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6530         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6531         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6532         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6533         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6534         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6535         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6536         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6537         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6538         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6539         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6540         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6541         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6542         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6543         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6544         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6545         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6546         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6547         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6548         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6549         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6550         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6551         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6552         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6553         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6554         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6555         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6556         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6557         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6558         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6559         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6560         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6561         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6562         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6563         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6564         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6565         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6566         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6567         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6568         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6569         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6570         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6571         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6572         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6573         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6574         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6575         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6576         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6577         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6578         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6579         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6580         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6581         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6582         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6583         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6584         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6585         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6586         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6587         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6588         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6589         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6590         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6591         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6592         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6593         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6594         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6595         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6596         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6597         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6598         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6599         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6600         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6601         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6602         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6603         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6604         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6605         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6606         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6607         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6608         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6609         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6610         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6611         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6612         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6613         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6614         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6615         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6616         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6617         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6618         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6619         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6620         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6621         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6622         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6623         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6624         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6625         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6626         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6627         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6628         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6629         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6630         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6631         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6632         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6633         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6634         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6635         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6636         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6637         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6638         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6639         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6640         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6641         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6642         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6643         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6644         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6645         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6646         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6647         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6648         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6649         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6650         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6651         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6652         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6653         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6654         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6655         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6656         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6657         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6658         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6659         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6660         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6661         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6662         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6663         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6664         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6665         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6666         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6667         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6668         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6669         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6670         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6671         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6672         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6673         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6674         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6675         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6676         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6677         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6678         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6679         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6680         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6681         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6682         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6683 };
6684
6685 static const u32 tg3TsoFwRodata[] = {
6686         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6687         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6688         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6689         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6690         0x00000000,
6691 };
6692
6693 static const u32 tg3TsoFwData[] = {
6694         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6695         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6696         0x00000000,
6697 };
6698
6699 /* 5705 needs a special version of the TSO firmware.  */
6700 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6701 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6702 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6703 #define TG3_TSO5_FW_START_ADDR          0x00010000
6704 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6705 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6706 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6707 #define TG3_TSO5_FW_RODATA_LEN          0x50
6708 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6709 #define TG3_TSO5_FW_DATA_LEN            0x20
6710 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6711 #define TG3_TSO5_FW_SBSS_LEN            0x28
6712 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6713 #define TG3_TSO5_FW_BSS_LEN             0x88
6714
6715 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6716         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6717         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6718         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6719         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6720         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6721         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6722         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6723         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6724         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6725         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6726         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6727         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6728         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6729         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6730         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6731         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6732         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6733         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6734         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6735         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6736         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6737         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6738         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6739         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6740         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6741         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6742         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6743         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6744         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6745         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6746         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6747         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6748         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6749         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6750         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6751         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6752         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6753         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6754         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6755         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6756         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6757         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6758         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6759         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6760         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6761         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6762         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6763         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6764         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6765         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6766         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6767         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6768         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6769         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6770         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6771         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6772         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6773         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6774         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6775         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6776         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6777         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6778         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6779         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6780         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6781         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6782         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6783         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6784         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6785         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6786         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6787         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6788         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6789         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6790         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6791         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6792         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6793         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6794         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6795         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6796         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6797         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6798         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6799         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6800         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6801         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6802         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6803         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6804         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6805         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6806         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6807         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6808         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6809         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6810         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6811         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6812         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6813         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6814         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6815         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6816         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6817         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6818         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6819         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6820         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6821         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6822         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6823         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6824         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6825         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6826         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6827         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6828         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6829         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6830         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6831         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6832         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6833         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6834         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6835         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6836         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6837         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6838         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6839         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6840         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6841         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6842         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6843         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6844         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6845         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6846         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6847         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6848         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6849         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6850         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6851         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6852         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6853         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6854         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6855         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6856         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6857         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6858         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6859         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6860         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6861         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6862         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6863         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6864         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6865         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6866         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6867         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6868         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6869         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6870         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6871         0x00000000, 0x00000000, 0x00000000,
6872 };
6873
6874 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6875         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6876         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6877         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6878         0x00000000, 0x00000000, 0x00000000,
6879 };
6880
6881 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6882         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6883         0x00000000, 0x00000000, 0x00000000,
6884 };
6885
6886 /* tp->lock is held. */
6887 static int tg3_load_tso_firmware(struct tg3 *tp)
6888 {
6889         struct fw_info info;
6890         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6891         int err, i;
6892
6893         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6894                 return 0;
6895
6896         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6897                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6898                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6899                 info.text_data = &tg3Tso5FwText[0];
6900                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6901                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6902                 info.rodata_data = &tg3Tso5FwRodata[0];
6903                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6904                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6905                 info.data_data = &tg3Tso5FwData[0];
6906                 cpu_base = RX_CPU_BASE;
6907                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6908                 cpu_scratch_size = (info.text_len +
6909                                     info.rodata_len +
6910                                     info.data_len +
6911                                     TG3_TSO5_FW_SBSS_LEN +
6912                                     TG3_TSO5_FW_BSS_LEN);
6913         } else {
6914                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6915                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6916                 info.text_data = &tg3TsoFwText[0];
6917                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6918                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6919                 info.rodata_data = &tg3TsoFwRodata[0];
6920                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6921                 info.data_len = TG3_TSO_FW_DATA_LEN;
6922                 info.data_data = &tg3TsoFwData[0];
6923                 cpu_base = TX_CPU_BASE;
6924                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6925                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6926         }
6927
6928         err = tg3_load_firmware_cpu(tp, cpu_base,
6929                                     cpu_scratch_base, cpu_scratch_size,
6930                                     &info);
6931         if (err)
6932                 return err;
6933
6934         /* Now startup the cpu. */
6935         tw32(cpu_base + CPU_STATE, 0xffffffff);
6936         tw32_f(cpu_base + CPU_PC,    info.text_base);
6937
6938         for (i = 0; i < 5; i++) {
6939                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6940                         break;
6941                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6942                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6943                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6944                 udelay(1000);
6945         }
6946         if (i >= 5) {
6947                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6948                        "to set CPU PC, is %08x should be %08x\n",
6949                        tp->dev->name, tr32(cpu_base + CPU_PC),
6950                        info.text_base);
6951                 return -ENODEV;
6952         }
6953         tw32(cpu_base + CPU_STATE, 0xffffffff);
6954         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6955         return 0;
6956 }
6957
6958
6959 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6960 {
6961         struct tg3 *tp = netdev_priv(dev);
6962         struct sockaddr *addr = p;
6963         int err = 0, skip_mac_1 = 0;
6964
6965         if (!is_valid_ether_addr(addr->sa_data))
6966                 return -EINVAL;
6967
6968         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6969
6970         if (!netif_running(dev))
6971                 return 0;
6972
6973         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6974                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6975
6976                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6977                 addr0_low = tr32(MAC_ADDR_0_LOW);
6978                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6979                 addr1_low = tr32(MAC_ADDR_1_LOW);
6980
6981                 /* Skip MAC addr 1 if ASF is using it. */
6982                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6983                     !(addr1_high == 0 && addr1_low == 0))
6984                         skip_mac_1 = 1;
6985         }
6986         spin_lock_bh(&tp->lock);
6987         __tg3_set_mac_addr(tp, skip_mac_1);
6988         spin_unlock_bh(&tp->lock);
6989
6990         return err;
6991 }
6992
6993 /* tp->lock is held. */
6994 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6995                            dma_addr_t mapping, u32 maxlen_flags,
6996                            u32 nic_addr)
6997 {
6998         tg3_write_mem(tp,
6999                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7000                       ((u64) mapping >> 32));
7001         tg3_write_mem(tp,
7002                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7003                       ((u64) mapping & 0xffffffff));
7004         tg3_write_mem(tp,
7005                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7006                        maxlen_flags);
7007
7008         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7009                 tg3_write_mem(tp,
7010                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7011                               nic_addr);
7012 }
7013
7014 static void __tg3_set_rx_mode(struct net_device *);
7015 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7016 {
7017         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7018         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7019         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7020         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7021         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7022                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7023                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7024         }
7025         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7026         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7027         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7028                 u32 val = ec->stats_block_coalesce_usecs;
7029
7030                 if (!netif_carrier_ok(tp->dev))
7031                         val = 0;
7032
7033                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7034         }
7035 }
7036
7037 /* tp->lock is held. */
7038 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7039 {
7040         u32 val, rdmac_mode;
7041         int i, err, limit;
7042
7043         tg3_disable_ints(tp);
7044
7045         tg3_stop_fw(tp);
7046
7047         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7048
7049         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7050                 tg3_abort_hw(tp, 1);
7051         }
7052
7053         if (reset_phy &&
7054             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7055                 tg3_phy_reset(tp);
7056
7057         err = tg3_chip_reset(tp);
7058         if (err)
7059                 return err;
7060
7061         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7062
7063         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7064                 val = tr32(TG3_CPMU_CTRL);
7065                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7066                 tw32(TG3_CPMU_CTRL, val);
7067
7068                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7069                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7070                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7071                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7072
7073                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7074                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7075                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7076                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7077
7078                 val = tr32(TG3_CPMU_HST_ACC);
7079                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7080                 val |= CPMU_HST_ACC_MACCLK_6_25;
7081                 tw32(TG3_CPMU_HST_ACC, val);
7082         }
7083
7084         /* This works around an issue with Athlon chipsets on
7085          * B3 tigon3 silicon.  This bit has no effect on any
7086          * other revision.  But do not set this on PCI Express
7087          * chips and don't even touch the clocks if the CPMU is present.
7088          */
7089         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7090                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7091                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7092                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7093         }
7094
7095         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7096             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7097                 val = tr32(TG3PCI_PCISTATE);
7098                 val |= PCISTATE_RETRY_SAME_DMA;
7099                 tw32(TG3PCI_PCISTATE, val);
7100         }
7101
7102         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7103                 /* Allow reads and writes to the
7104                  * APE register and memory space.
7105                  */
7106                 val = tr32(TG3PCI_PCISTATE);
7107                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7108                        PCISTATE_ALLOW_APE_SHMEM_WR;
7109                 tw32(TG3PCI_PCISTATE, val);
7110         }
7111
7112         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7113                 /* Enable some hw fixes.  */
7114                 val = tr32(TG3PCI_MSI_DATA);
7115                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7116                 tw32(TG3PCI_MSI_DATA, val);
7117         }
7118
7119         /* Descriptor ring init may make accesses to the
7120          * NIC SRAM area to setup the TX descriptors, so we
7121          * can only do this after the hardware has been
7122          * successfully reset.
7123          */
7124         err = tg3_init_rings(tp);
7125         if (err)
7126                 return err;
7127
7128         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7129             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7130             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7131                 /* This value is determined during the probe time DMA
7132                  * engine test, tg3_test_dma.
7133                  */
7134                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7135         }
7136
7137         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7138                           GRC_MODE_4X_NIC_SEND_RINGS |
7139                           GRC_MODE_NO_TX_PHDR_CSUM |
7140                           GRC_MODE_NO_RX_PHDR_CSUM);
7141         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7142
7143         /* Pseudo-header checksum is done by hardware logic and not
7144          * the offload processers, so make the chip do the pseudo-
7145          * header checksums on receive.  For transmit it is more
7146          * convenient to do the pseudo-header checksum in software
7147          * as Linux does that on transmit for us in all cases.
7148          */
7149         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7150
7151         tw32(GRC_MODE,
7152              tp->grc_mode |
7153              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7154
7155         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7156         val = tr32(GRC_MISC_CFG);
7157         val &= ~0xff;
7158         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7159         tw32(GRC_MISC_CFG, val);
7160
7161         /* Initialize MBUF/DESC pool. */
7162         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7163                 /* Do nothing.  */
7164         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7165                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7166                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7167                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7168                 else
7169                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7170                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7171                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7172         }
7173         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7174                 int fw_len;
7175
7176                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7177                           TG3_TSO5_FW_RODATA_LEN +
7178                           TG3_TSO5_FW_DATA_LEN +
7179                           TG3_TSO5_FW_SBSS_LEN +
7180                           TG3_TSO5_FW_BSS_LEN);
7181                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7182                 tw32(BUFMGR_MB_POOL_ADDR,
7183                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7184                 tw32(BUFMGR_MB_POOL_SIZE,
7185                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7186         }
7187
7188         if (tp->dev->mtu <= ETH_DATA_LEN) {
7189                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7190                      tp->bufmgr_config.mbuf_read_dma_low_water);
7191                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7192                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7193                 tw32(BUFMGR_MB_HIGH_WATER,
7194                      tp->bufmgr_config.mbuf_high_water);
7195         } else {
7196                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7197                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7198                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7199                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7200                 tw32(BUFMGR_MB_HIGH_WATER,
7201                      tp->bufmgr_config.mbuf_high_water_jumbo);
7202         }
7203         tw32(BUFMGR_DMA_LOW_WATER,
7204              tp->bufmgr_config.dma_low_water);
7205         tw32(BUFMGR_DMA_HIGH_WATER,
7206              tp->bufmgr_config.dma_high_water);
7207
7208         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7209         for (i = 0; i < 2000; i++) {
7210                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7211                         break;
7212                 udelay(10);
7213         }
7214         if (i >= 2000) {
7215                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7216                        tp->dev->name);
7217                 return -ENODEV;
7218         }
7219
7220         /* Setup replenish threshold. */
7221         val = tp->rx_pending / 8;
7222         if (val == 0)
7223                 val = 1;
7224         else if (val > tp->rx_std_max_post)
7225                 val = tp->rx_std_max_post;
7226         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7227                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7228                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7229
7230                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7231                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7232         }
7233
7234         tw32(RCVBDI_STD_THRESH, val);
7235
7236         /* Initialize TG3_BDINFO's at:
7237          *  RCVDBDI_STD_BD:     standard eth size rx ring
7238          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7239          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7240          *
7241          * like so:
7242          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7243          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7244          *                              ring attribute flags
7245          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7246          *
7247          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7248          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7249          *
7250          * The size of each ring is fixed in the firmware, but the location is
7251          * configurable.
7252          */
7253         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7254              ((u64) tp->rx_std_mapping >> 32));
7255         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7256              ((u64) tp->rx_std_mapping & 0xffffffff));
7257         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7258              NIC_SRAM_RX_BUFFER_DESC);
7259
7260         /* Don't even try to program the JUMBO/MINI buffer descriptor
7261          * configs on 5705.
7262          */
7263         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7264                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7265                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7266         } else {
7267                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7268                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7269
7270                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7271                      BDINFO_FLAGS_DISABLED);
7272
7273                 /* Setup replenish threshold. */
7274                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7275
7276                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7277                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7278                              ((u64) tp->rx_jumbo_mapping >> 32));
7279                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7280                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7281                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7282                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7283                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7284                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7285                 } else {
7286                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7287                              BDINFO_FLAGS_DISABLED);
7288                 }
7289
7290         }
7291
7292         /* There is only one send ring on 5705/5750, no need to explicitly
7293          * disable the others.
7294          */
7295         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7296                 /* Clear out send RCB ring in SRAM. */
7297                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7298                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7299                                       BDINFO_FLAGS_DISABLED);
7300         }
7301
7302         tp->tx_prod = 0;
7303         tp->tx_cons = 0;
7304         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7305         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7306
7307         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7308                        tp->tx_desc_mapping,
7309                        (TG3_TX_RING_SIZE <<
7310                         BDINFO_FLAGS_MAXLEN_SHIFT),
7311                        NIC_SRAM_TX_BUFFER_DESC);
7312
7313         /* There is only one receive return ring on 5705/5750, no need
7314          * to explicitly disable the others.
7315          */
7316         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7317                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7318                      i += TG3_BDINFO_SIZE) {
7319                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7320                                       BDINFO_FLAGS_DISABLED);
7321                 }
7322         }
7323
7324         tp->rx_rcb_ptr = 0;
7325         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7326
7327         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7328                        tp->rx_rcb_mapping,
7329                        (TG3_RX_RCB_RING_SIZE(tp) <<
7330                         BDINFO_FLAGS_MAXLEN_SHIFT),
7331                        0);
7332
7333         tp->rx_std_ptr = tp->rx_pending;
7334         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7335                      tp->rx_std_ptr);
7336
7337         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7338                                                 tp->rx_jumbo_pending : 0;
7339         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7340                      tp->rx_jumbo_ptr);
7341
7342         /* Initialize MAC address and backoff seed. */
7343         __tg3_set_mac_addr(tp, 0);
7344
7345         /* MTU + ethernet header + FCS + optional VLAN tag */
7346         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7347
7348         /* The slot time is changed by tg3_setup_phy if we
7349          * run at gigabit with half duplex.
7350          */
7351         tw32(MAC_TX_LENGTHS,
7352              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7353              (6 << TX_LENGTHS_IPG_SHIFT) |
7354              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7355
7356         /* Receive rules. */
7357         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7358         tw32(RCVLPC_CONFIG, 0x0181);
7359
7360         /* Calculate RDMAC_MODE setting early, we need it to determine
7361          * the RCVLPC_STATE_ENABLE mask.
7362          */
7363         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7364                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7365                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7366                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7367                       RDMAC_MODE_LNGREAD_ENAB);
7368
7369         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7370             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7371                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7372                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7373                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7374
7375         /* If statement applies to 5705 and 5750 PCI devices only */
7376         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7377              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7378             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7379                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7380                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7381                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7382                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7383                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7384                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7385                 }
7386         }
7387
7388         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7389                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7390
7391         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7392                 rdmac_mode |= (1 << 27);
7393
7394         /* Receive/send statistics. */
7395         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7396                 val = tr32(RCVLPC_STATS_ENABLE);
7397                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7398                 tw32(RCVLPC_STATS_ENABLE, val);
7399         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7400                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7401                 val = tr32(RCVLPC_STATS_ENABLE);
7402                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7403                 tw32(RCVLPC_STATS_ENABLE, val);
7404         } else {
7405                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7406         }
7407         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7408         tw32(SNDDATAI_STATSENAB, 0xffffff);
7409         tw32(SNDDATAI_STATSCTRL,
7410              (SNDDATAI_SCTRL_ENABLE |
7411               SNDDATAI_SCTRL_FASTUPD));
7412
7413         /* Setup host coalescing engine. */
7414         tw32(HOSTCC_MODE, 0);
7415         for (i = 0; i < 2000; i++) {
7416                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7417                         break;
7418                 udelay(10);
7419         }
7420
7421         __tg3_set_coalesce(tp, &tp->coal);
7422
7423         /* set status block DMA address */
7424         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7425              ((u64) tp->status_mapping >> 32));
7426         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7427              ((u64) tp->status_mapping & 0xffffffff));
7428
7429         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7430                 /* Status/statistics block address.  See tg3_timer,
7431                  * the tg3_periodic_fetch_stats call there, and
7432                  * tg3_get_stats to see how this works for 5705/5750 chips.
7433                  */
7434                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7435                      ((u64) tp->stats_mapping >> 32));
7436                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7437                      ((u64) tp->stats_mapping & 0xffffffff));
7438                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7439                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7440         }
7441
7442         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7443
7444         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7445         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7446         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7447                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7448
7449         /* Clear statistics/status block in chip, and status block in ram. */
7450         for (i = NIC_SRAM_STATS_BLK;
7451              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7452              i += sizeof(u32)) {
7453                 tg3_write_mem(tp, i, 0);
7454                 udelay(40);
7455         }
7456         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7457
7458         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7459                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7460                 /* reset to prevent losing 1st rx packet intermittently */
7461                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7462                 udelay(10);
7463         }
7464
7465         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7466                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7467         else
7468                 tp->mac_mode = 0;
7469         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7470                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7471         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7472             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7473             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7474                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7475         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7476         udelay(40);
7477
7478         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7479          * If TG3_FLG2_IS_NIC is zero, we should read the
7480          * register to preserve the GPIO settings for LOMs. The GPIOs,
7481          * whether used as inputs or outputs, are set by boot code after
7482          * reset.
7483          */
7484         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7485                 u32 gpio_mask;
7486
7487                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7488                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7489                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7490
7491                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7492                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7493                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7494
7495                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7496                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7497
7498                 tp->grc_local_ctrl &= ~gpio_mask;
7499                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7500
7501                 /* GPIO1 must be driven high for eeprom write protect */
7502                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7503                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7504                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7505         }
7506         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7507         udelay(100);
7508
7509         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7510         tp->last_tag = 0;
7511
7512         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7513                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7514                 udelay(40);
7515         }
7516
7517         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7518                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7519                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7520                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7521                WDMAC_MODE_LNGREAD_ENAB);
7522
7523         /* If statement applies to 5705 and 5750 PCI devices only */
7524         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7525              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7526             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7527                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7528                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7529                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7530                         /* nothing */
7531                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7532                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7533                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7534                         val |= WDMAC_MODE_RX_ACCEL;
7535                 }
7536         }
7537
7538         /* Enable host coalescing bug fix */
7539         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7540             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7541             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7542             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7543             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7544                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7545
7546         tw32_f(WDMAC_MODE, val);
7547         udelay(40);
7548
7549         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7550                 u16 pcix_cmd;
7551
7552                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7553                                      &pcix_cmd);
7554                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7555                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7556                         pcix_cmd |= PCI_X_CMD_READ_2K;
7557                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7558                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7559                         pcix_cmd |= PCI_X_CMD_READ_2K;
7560                 }
7561                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7562                                       pcix_cmd);
7563         }
7564
7565         tw32_f(RDMAC_MODE, rdmac_mode);
7566         udelay(40);
7567
7568         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7569         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7570                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7571
7572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7573                 tw32(SNDDATAC_MODE,
7574                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7575         else
7576                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7577
7578         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7579         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7580         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7581         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7582         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7583                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7584         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7585         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7586
7587         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7588                 err = tg3_load_5701_a0_firmware_fix(tp);
7589                 if (err)
7590                         return err;
7591         }
7592
7593         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7594                 err = tg3_load_tso_firmware(tp);
7595                 if (err)
7596                         return err;
7597         }
7598
7599         tp->tx_mode = TX_MODE_ENABLE;
7600         tw32_f(MAC_TX_MODE, tp->tx_mode);
7601         udelay(100);
7602
7603         tp->rx_mode = RX_MODE_ENABLE;
7604         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7605             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7606             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7607             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7608                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7609
7610         tw32_f(MAC_RX_MODE, tp->rx_mode);
7611         udelay(10);
7612
7613         tw32(MAC_LED_CTRL, tp->led_ctrl);
7614
7615         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7616         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7617                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7618                 udelay(10);
7619         }
7620         tw32_f(MAC_RX_MODE, tp->rx_mode);
7621         udelay(10);
7622
7623         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7624                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7625                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7626                         /* Set drive transmission level to 1.2V  */
7627                         /* only if the signal pre-emphasis bit is not set  */
7628                         val = tr32(MAC_SERDES_CFG);
7629                         val &= 0xfffff000;
7630                         val |= 0x880;
7631                         tw32(MAC_SERDES_CFG, val);
7632                 }
7633                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7634                         tw32(MAC_SERDES_CFG, 0x616000);
7635         }
7636
7637         /* Prevent chip from dropping frames when flow control
7638          * is enabled.
7639          */
7640         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7641
7642         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7643             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7644                 /* Use hardware link auto-negotiation */
7645                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7646         }
7647
7648         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7649             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7650                 u32 tmp;
7651
7652                 tmp = tr32(SERDES_RX_CTRL);
7653                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7654                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7655                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7656                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7657         }
7658
7659         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7660                 if (tp->link_config.phy_is_low_power) {
7661                         tp->link_config.phy_is_low_power = 0;
7662                         tp->link_config.speed = tp->link_config.orig_speed;
7663                         tp->link_config.duplex = tp->link_config.orig_duplex;
7664                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7665                 }
7666
7667                 err = tg3_setup_phy(tp, 0);
7668                 if (err)
7669                         return err;
7670
7671                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7672                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7673                         u32 tmp;
7674
7675                         /* Clear CRC stats. */
7676                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7677                                 tg3_writephy(tp, MII_TG3_TEST1,
7678                                              tmp | MII_TG3_TEST1_CRC_EN);
7679                                 tg3_readphy(tp, 0x14, &tmp);
7680                         }
7681                 }
7682         }
7683
7684         __tg3_set_rx_mode(tp->dev);
7685
7686         /* Initialize receive rules. */
7687         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7688         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7689         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7690         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7691
7692         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7693             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7694                 limit = 8;
7695         else
7696                 limit = 16;
7697         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7698                 limit -= 4;
7699         switch (limit) {
7700         case 16:
7701                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7702         case 15:
7703                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7704         case 14:
7705                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7706         case 13:
7707                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7708         case 12:
7709                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7710         case 11:
7711                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7712         case 10:
7713                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7714         case 9:
7715                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7716         case 8:
7717                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7718         case 7:
7719                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7720         case 6:
7721                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7722         case 5:
7723                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7724         case 4:
7725                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7726         case 3:
7727                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7728         case 2:
7729         case 1:
7730
7731         default:
7732                 break;
7733         }
7734
7735         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7736                 /* Write our heartbeat update interval to APE. */
7737                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7738                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7739
7740         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7741
7742         return 0;
7743 }
7744
7745 /* Called at device open time to get the chip ready for
7746  * packet processing.  Invoked with tp->lock held.
7747  */
7748 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7749 {
7750         tg3_switch_clocks(tp);
7751
7752         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7753
7754         return tg3_reset_hw(tp, reset_phy);
7755 }
7756
7757 #define TG3_STAT_ADD32(PSTAT, REG) \
7758 do {    u32 __val = tr32(REG); \
7759         (PSTAT)->low += __val; \
7760         if ((PSTAT)->low < __val) \
7761                 (PSTAT)->high += 1; \
7762 } while (0)
7763
7764 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7765 {
7766         struct tg3_hw_stats *sp = tp->hw_stats;
7767
7768         if (!netif_carrier_ok(tp->dev))
7769                 return;
7770
7771         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7772         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7773         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7774         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7775         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7776         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7777         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7778         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7779         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7780         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7781         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7782         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7783         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7784
7785         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7786         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7787         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7788         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7789         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7790         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7791         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7792         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7793         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7794         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7795         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7796         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7797         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7798         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7799
7800         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7801         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7802         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7803 }
7804
7805 static void tg3_timer(unsigned long __opaque)
7806 {
7807         struct tg3 *tp = (struct tg3 *) __opaque;
7808
7809         if (tp->irq_sync)
7810                 goto restart_timer;
7811
7812         spin_lock(&tp->lock);
7813
7814         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7815                 /* All of this garbage is because when using non-tagged
7816                  * IRQ status the mailbox/status_block protocol the chip
7817                  * uses with the cpu is race prone.
7818                  */
7819                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7820                         tw32(GRC_LOCAL_CTRL,
7821                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7822                 } else {
7823                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7824                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7825                 }
7826
7827                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7828                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7829                         spin_unlock(&tp->lock);
7830                         schedule_work(&tp->reset_task);
7831                         return;
7832                 }
7833         }
7834
7835         /* This part only runs once per second. */
7836         if (!--tp->timer_counter) {
7837                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7838                         tg3_periodic_fetch_stats(tp);
7839
7840                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7841                         u32 mac_stat;
7842                         int phy_event;
7843
7844                         mac_stat = tr32(MAC_STATUS);
7845
7846                         phy_event = 0;
7847                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7848                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7849                                         phy_event = 1;
7850                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7851                                 phy_event = 1;
7852
7853                         if (phy_event)
7854                                 tg3_setup_phy(tp, 0);
7855                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7856                         u32 mac_stat = tr32(MAC_STATUS);
7857                         int need_setup = 0;
7858
7859                         if (netif_carrier_ok(tp->dev) &&
7860                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7861                                 need_setup = 1;
7862                         }
7863                         if (! netif_carrier_ok(tp->dev) &&
7864                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7865                                          MAC_STATUS_SIGNAL_DET))) {
7866                                 need_setup = 1;
7867                         }
7868                         if (need_setup) {
7869                                 if (!tp->serdes_counter) {
7870                                         tw32_f(MAC_MODE,
7871                                              (tp->mac_mode &
7872                                               ~MAC_MODE_PORT_MODE_MASK));
7873                                         udelay(40);
7874                                         tw32_f(MAC_MODE, tp->mac_mode);
7875                                         udelay(40);
7876                                 }
7877                                 tg3_setup_phy(tp, 0);
7878                         }
7879                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7880                         tg3_serdes_parallel_detect(tp);
7881
7882                 tp->timer_counter = tp->timer_multiplier;
7883         }
7884
7885         /* Heartbeat is only sent once every 2 seconds.
7886          *
7887          * The heartbeat is to tell the ASF firmware that the host
7888          * driver is still alive.  In the event that the OS crashes,
7889          * ASF needs to reset the hardware to free up the FIFO space
7890          * that may be filled with rx packets destined for the host.
7891          * If the FIFO is full, ASF will no longer function properly.
7892          *
7893          * Unintended resets have been reported on real time kernels
7894          * where the timer doesn't run on time.  Netpoll will also have
7895          * same problem.
7896          *
7897          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7898          * to check the ring condition when the heartbeat is expiring
7899          * before doing the reset.  This will prevent most unintended
7900          * resets.
7901          */
7902         if (!--tp->asf_counter) {
7903                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7904                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7905                         tg3_wait_for_event_ack(tp);
7906
7907                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7908                                       FWCMD_NICDRV_ALIVE3);
7909                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7910                         /* 5 seconds timeout */
7911                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7912
7913                         tg3_generate_fw_event(tp);
7914                 }
7915                 tp->asf_counter = tp->asf_multiplier;
7916         }
7917
7918         spin_unlock(&tp->lock);
7919
7920 restart_timer:
7921         tp->timer.expires = jiffies + tp->timer_offset;
7922         add_timer(&tp->timer);
7923 }
7924
7925 static int tg3_request_irq(struct tg3 *tp)
7926 {
7927         irq_handler_t fn;
7928         unsigned long flags;
7929         struct net_device *dev = tp->dev;
7930
7931         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7932                 fn = tg3_msi;
7933                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7934                         fn = tg3_msi_1shot;
7935                 flags = IRQF_SAMPLE_RANDOM;
7936         } else {
7937                 fn = tg3_interrupt;
7938                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7939                         fn = tg3_interrupt_tagged;
7940                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7941         }
7942         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7943 }
7944
7945 static int tg3_test_interrupt(struct tg3 *tp)
7946 {
7947         struct net_device *dev = tp->dev;
7948         int err, i, intr_ok = 0;
7949
7950         if (!netif_running(dev))
7951                 return -ENODEV;
7952
7953         tg3_disable_ints(tp);
7954
7955         free_irq(tp->pdev->irq, dev);
7956
7957         err = request_irq(tp->pdev->irq, tg3_test_isr,
7958                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7959         if (err)
7960                 return err;
7961
7962         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7963         tg3_enable_ints(tp);
7964
7965         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7966                HOSTCC_MODE_NOW);
7967
7968         for (i = 0; i < 5; i++) {
7969                 u32 int_mbox, misc_host_ctrl;
7970
7971                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7972                                         TG3_64BIT_REG_LOW);
7973                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7974
7975                 if ((int_mbox != 0) ||
7976                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7977                         intr_ok = 1;
7978                         break;
7979                 }
7980
7981                 msleep(10);
7982         }
7983
7984         tg3_disable_ints(tp);
7985
7986         free_irq(tp->pdev->irq, dev);
7987
7988         err = tg3_request_irq(tp);
7989
7990         if (err)
7991                 return err;
7992
7993         if (intr_ok)
7994                 return 0;
7995
7996         return -EIO;
7997 }
7998
7999 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8000  * successfully restored
8001  */
8002 static int tg3_test_msi(struct tg3 *tp)
8003 {
8004         struct net_device *dev = tp->dev;
8005         int err;
8006         u16 pci_cmd;
8007
8008         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8009                 return 0;
8010
8011         /* Turn off SERR reporting in case MSI terminates with Master
8012          * Abort.
8013          */
8014         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8015         pci_write_config_word(tp->pdev, PCI_COMMAND,
8016                               pci_cmd & ~PCI_COMMAND_SERR);
8017
8018         err = tg3_test_interrupt(tp);
8019
8020         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8021
8022         if (!err)
8023                 return 0;
8024
8025         /* other failures */
8026         if (err != -EIO)
8027                 return err;
8028
8029         /* MSI test failed, go back to INTx mode */
8030         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8031                "switching to INTx mode. Please report this failure to "
8032                "the PCI maintainer and include system chipset information.\n",
8033                        tp->dev->name);
8034
8035         free_irq(tp->pdev->irq, dev);
8036         pci_disable_msi(tp->pdev);
8037
8038         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8039
8040         err = tg3_request_irq(tp);
8041         if (err)
8042                 return err;
8043
8044         /* Need to reset the chip because the MSI cycle may have terminated
8045          * with Master Abort.
8046          */
8047         tg3_full_lock(tp, 1);
8048
8049         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8050         err = tg3_init_hw(tp, 1);
8051
8052         tg3_full_unlock(tp);
8053
8054         if (err)
8055                 free_irq(tp->pdev->irq, dev);
8056
8057         return err;
8058 }
8059
8060 static int tg3_open(struct net_device *dev)
8061 {
8062         struct tg3 *tp = netdev_priv(dev);
8063         int err;
8064
8065         netif_carrier_off(tp->dev);
8066
8067         err = tg3_set_power_state(tp, PCI_D0);
8068         if (err)
8069                 return err;
8070
8071         tg3_full_lock(tp, 0);
8072
8073         tg3_disable_ints(tp);
8074         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8075
8076         tg3_full_unlock(tp);
8077
8078         /* The placement of this call is tied
8079          * to the setup and use of Host TX descriptors.
8080          */
8081         err = tg3_alloc_consistent(tp);
8082         if (err)
8083                 return err;
8084
8085         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8086                 /* All MSI supporting chips should support tagged
8087                  * status.  Assert that this is the case.
8088                  */
8089                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8090                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8091                                "Not using MSI.\n", tp->dev->name);
8092                 } else if (pci_enable_msi(tp->pdev) == 0) {
8093                         u32 msi_mode;
8094
8095                         msi_mode = tr32(MSGINT_MODE);
8096                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8097                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8098                 }
8099         }
8100         err = tg3_request_irq(tp);
8101
8102         if (err) {
8103                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8104                         pci_disable_msi(tp->pdev);
8105                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8106                 }
8107                 tg3_free_consistent(tp);
8108                 return err;
8109         }
8110
8111         napi_enable(&tp->napi);
8112
8113         tg3_full_lock(tp, 0);
8114
8115         err = tg3_init_hw(tp, 1);
8116         if (err) {
8117                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8118                 tg3_free_rings(tp);
8119         } else {
8120                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8121                         tp->timer_offset = HZ;
8122                 else
8123                         tp->timer_offset = HZ / 10;
8124
8125                 BUG_ON(tp->timer_offset > HZ);
8126                 tp->timer_counter = tp->timer_multiplier =
8127                         (HZ / tp->timer_offset);
8128                 tp->asf_counter = tp->asf_multiplier =
8129                         ((HZ / tp->timer_offset) * 2);
8130
8131                 init_timer(&tp->timer);
8132                 tp->timer.expires = jiffies + tp->timer_offset;
8133                 tp->timer.data = (unsigned long) tp;
8134                 tp->timer.function = tg3_timer;
8135         }
8136
8137         tg3_full_unlock(tp);
8138
8139         if (err) {
8140                 napi_disable(&tp->napi);
8141                 free_irq(tp->pdev->irq, dev);
8142                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8143                         pci_disable_msi(tp->pdev);
8144                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8145                 }
8146                 tg3_free_consistent(tp);
8147                 return err;
8148         }
8149
8150         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8151                 err = tg3_test_msi(tp);
8152
8153                 if (err) {
8154                         tg3_full_lock(tp, 0);
8155
8156                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8157                                 pci_disable_msi(tp->pdev);
8158                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8159                         }
8160                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8161                         tg3_free_rings(tp);
8162                         tg3_free_consistent(tp);
8163
8164                         tg3_full_unlock(tp);
8165
8166                         napi_disable(&tp->napi);
8167
8168                         return err;
8169                 }
8170
8171                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8172                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8173                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8174
8175                                 tw32(PCIE_TRANSACTION_CFG,
8176                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8177                         }
8178                 }
8179         }
8180
8181         tg3_phy_start(tp);
8182
8183         tg3_full_lock(tp, 0);
8184
8185         add_timer(&tp->timer);
8186         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8187         tg3_enable_ints(tp);
8188
8189         tg3_full_unlock(tp);
8190
8191         netif_start_queue(dev);
8192
8193         return 0;
8194 }
8195
8196 #if 0
8197 /*static*/ void tg3_dump_state(struct tg3 *tp)
8198 {
8199         u32 val32, val32_2, val32_3, val32_4, val32_5;
8200         u16 val16;
8201         int i;
8202
8203         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8204         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8205         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8206                val16, val32);
8207
8208         /* MAC block */
8209         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8210                tr32(MAC_MODE), tr32(MAC_STATUS));
8211         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8212                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8213         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8214                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8215         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8216                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8217
8218         /* Send data initiator control block */
8219         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8220                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8221         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8222                tr32(SNDDATAI_STATSCTRL));
8223
8224         /* Send data completion control block */
8225         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8226
8227         /* Send BD ring selector block */
8228         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8229                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8230
8231         /* Send BD initiator control block */
8232         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8233                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8234
8235         /* Send BD completion control block */
8236         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8237
8238         /* Receive list placement control block */
8239         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8240                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8241         printk("       RCVLPC_STATSCTRL[%08x]\n",
8242                tr32(RCVLPC_STATSCTRL));
8243
8244         /* Receive data and receive BD initiator control block */
8245         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8246                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8247
8248         /* Receive data completion control block */
8249         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8250                tr32(RCVDCC_MODE));
8251
8252         /* Receive BD initiator control block */
8253         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8254                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8255
8256         /* Receive BD completion control block */
8257         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8258                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8259
8260         /* Receive list selector control block */
8261         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8262                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8263
8264         /* Mbuf cluster free block */
8265         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8266                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8267
8268         /* Host coalescing control block */
8269         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8270                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8271         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8272                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8273                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8274         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8275                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8276                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8277         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8278                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8279         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8280                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8281
8282         /* Memory arbiter control block */
8283         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8284                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8285
8286         /* Buffer manager control block */
8287         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8288                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8289         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8290                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8291         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8292                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8293                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8294                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8295
8296         /* Read DMA control block */
8297         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8298                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8299
8300         /* Write DMA control block */
8301         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8302                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8303
8304         /* DMA completion block */
8305         printk("DEBUG: DMAC_MODE[%08x]\n",
8306                tr32(DMAC_MODE));
8307
8308         /* GRC block */
8309         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8310                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8311         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8312                tr32(GRC_LOCAL_CTRL));
8313
8314         /* TG3_BDINFOs */
8315         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8316                tr32(RCVDBDI_JUMBO_BD + 0x0),
8317                tr32(RCVDBDI_JUMBO_BD + 0x4),
8318                tr32(RCVDBDI_JUMBO_BD + 0x8),
8319                tr32(RCVDBDI_JUMBO_BD + 0xc));
8320         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8321                tr32(RCVDBDI_STD_BD + 0x0),
8322                tr32(RCVDBDI_STD_BD + 0x4),
8323                tr32(RCVDBDI_STD_BD + 0x8),
8324                tr32(RCVDBDI_STD_BD + 0xc));
8325         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8326                tr32(RCVDBDI_MINI_BD + 0x0),
8327                tr32(RCVDBDI_MINI_BD + 0x4),
8328                tr32(RCVDBDI_MINI_BD + 0x8),
8329                tr32(RCVDBDI_MINI_BD + 0xc));
8330
8331         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8332         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8333         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8334         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8335         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8336                val32, val32_2, val32_3, val32_4);
8337
8338         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8339         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8340         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8341         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8342         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8343                val32, val32_2, val32_3, val32_4);
8344
8345         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8346         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8347         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8348         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8349         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8350         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8351                val32, val32_2, val32_3, val32_4, val32_5);
8352
8353         /* SW status block */
8354         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8355                tp->hw_status->status,
8356                tp->hw_status->status_tag,
8357                tp->hw_status->rx_jumbo_consumer,
8358                tp->hw_status->rx_consumer,
8359                tp->hw_status->rx_mini_consumer,
8360                tp->hw_status->idx[0].rx_producer,
8361                tp->hw_status->idx[0].tx_consumer);
8362
8363         /* SW statistics block */
8364         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8365                ((u32 *)tp->hw_stats)[0],
8366                ((u32 *)tp->hw_stats)[1],
8367                ((u32 *)tp->hw_stats)[2],
8368                ((u32 *)tp->hw_stats)[3]);
8369
8370         /* Mailboxes */
8371         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8372                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8373                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8374                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8375                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8376
8377         /* NIC side send descriptors. */
8378         for (i = 0; i < 6; i++) {
8379                 unsigned long txd;
8380
8381                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8382                         + (i * sizeof(struct tg3_tx_buffer_desc));
8383                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8384                        i,
8385                        readl(txd + 0x0), readl(txd + 0x4),
8386                        readl(txd + 0x8), readl(txd + 0xc));
8387         }
8388
8389         /* NIC side RX descriptors. */
8390         for (i = 0; i < 6; i++) {
8391                 unsigned long rxd;
8392
8393                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8394                         + (i * sizeof(struct tg3_rx_buffer_desc));
8395                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8396                        i,
8397                        readl(rxd + 0x0), readl(rxd + 0x4),
8398                        readl(rxd + 0x8), readl(rxd + 0xc));
8399                 rxd += (4 * sizeof(u32));
8400                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8401                        i,
8402                        readl(rxd + 0x0), readl(rxd + 0x4),
8403                        readl(rxd + 0x8), readl(rxd + 0xc));
8404         }
8405
8406         for (i = 0; i < 6; i++) {
8407                 unsigned long rxd;
8408
8409                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8410                         + (i * sizeof(struct tg3_rx_buffer_desc));
8411                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8412                        i,
8413                        readl(rxd + 0x0), readl(rxd + 0x4),
8414                        readl(rxd + 0x8), readl(rxd + 0xc));
8415                 rxd += (4 * sizeof(u32));
8416                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8417                        i,
8418                        readl(rxd + 0x0), readl(rxd + 0x4),
8419                        readl(rxd + 0x8), readl(rxd + 0xc));
8420         }
8421 }
8422 #endif
8423
8424 static struct net_device_stats *tg3_get_stats(struct net_device *);
8425 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8426
8427 static int tg3_close(struct net_device *dev)
8428 {
8429         struct tg3 *tp = netdev_priv(dev);
8430
8431         napi_disable(&tp->napi);
8432         cancel_work_sync(&tp->reset_task);
8433
8434         netif_stop_queue(dev);
8435
8436         del_timer_sync(&tp->timer);
8437
8438         tg3_full_lock(tp, 1);
8439 #if 0
8440         tg3_dump_state(tp);
8441 #endif
8442
8443         tg3_disable_ints(tp);
8444
8445         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8446         tg3_free_rings(tp);
8447         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8448
8449         tg3_full_unlock(tp);
8450
8451         free_irq(tp->pdev->irq, dev);
8452         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8453                 pci_disable_msi(tp->pdev);
8454                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8455         }
8456
8457         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8458                sizeof(tp->net_stats_prev));
8459         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8460                sizeof(tp->estats_prev));
8461
8462         tg3_free_consistent(tp);
8463
8464         tg3_set_power_state(tp, PCI_D3hot);
8465
8466         netif_carrier_off(tp->dev);
8467
8468         return 0;
8469 }
8470
8471 static inline unsigned long get_stat64(tg3_stat64_t *val)
8472 {
8473         unsigned long ret;
8474
8475 #if (BITS_PER_LONG == 32)
8476         ret = val->low;
8477 #else
8478         ret = ((u64)val->high << 32) | ((u64)val->low);
8479 #endif
8480         return ret;
8481 }
8482
8483 static inline u64 get_estat64(tg3_stat64_t *val)
8484 {
8485        return ((u64)val->high << 32) | ((u64)val->low);
8486 }
8487
8488 static unsigned long calc_crc_errors(struct tg3 *tp)
8489 {
8490         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8491
8492         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8493             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8494              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8495                 u32 val;
8496
8497                 spin_lock_bh(&tp->lock);
8498                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8499                         tg3_writephy(tp, MII_TG3_TEST1,
8500                                      val | MII_TG3_TEST1_CRC_EN);
8501                         tg3_readphy(tp, 0x14, &val);
8502                 } else
8503                         val = 0;
8504                 spin_unlock_bh(&tp->lock);
8505
8506                 tp->phy_crc_errors += val;
8507
8508                 return tp->phy_crc_errors;
8509         }
8510
8511         return get_stat64(&hw_stats->rx_fcs_errors);
8512 }
8513
8514 #define ESTAT_ADD(member) \
8515         estats->member =        old_estats->member + \
8516                                 get_estat64(&hw_stats->member)
8517
8518 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8519 {
8520         struct tg3_ethtool_stats *estats = &tp->estats;
8521         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8522         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8523
8524         if (!hw_stats)
8525                 return old_estats;
8526
8527         ESTAT_ADD(rx_octets);
8528         ESTAT_ADD(rx_fragments);
8529         ESTAT_ADD(rx_ucast_packets);
8530         ESTAT_ADD(rx_mcast_packets);
8531         ESTAT_ADD(rx_bcast_packets);
8532         ESTAT_ADD(rx_fcs_errors);
8533         ESTAT_ADD(rx_align_errors);
8534         ESTAT_ADD(rx_xon_pause_rcvd);
8535         ESTAT_ADD(rx_xoff_pause_rcvd);
8536         ESTAT_ADD(rx_mac_ctrl_rcvd);
8537         ESTAT_ADD(rx_xoff_entered);
8538         ESTAT_ADD(rx_frame_too_long_errors);
8539         ESTAT_ADD(rx_jabbers);
8540         ESTAT_ADD(rx_undersize_packets);
8541         ESTAT_ADD(rx_in_length_errors);
8542         ESTAT_ADD(rx_out_length_errors);
8543         ESTAT_ADD(rx_64_or_less_octet_packets);
8544         ESTAT_ADD(rx_65_to_127_octet_packets);
8545         ESTAT_ADD(rx_128_to_255_octet_packets);
8546         ESTAT_ADD(rx_256_to_511_octet_packets);
8547         ESTAT_ADD(rx_512_to_1023_octet_packets);
8548         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8549         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8550         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8551         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8552         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8553
8554         ESTAT_ADD(tx_octets);
8555         ESTAT_ADD(tx_collisions);
8556         ESTAT_ADD(tx_xon_sent);
8557         ESTAT_ADD(tx_xoff_sent);
8558         ESTAT_ADD(tx_flow_control);
8559         ESTAT_ADD(tx_mac_errors);
8560         ESTAT_ADD(tx_single_collisions);
8561         ESTAT_ADD(tx_mult_collisions);
8562         ESTAT_ADD(tx_deferred);
8563         ESTAT_ADD(tx_excessive_collisions);
8564         ESTAT_ADD(tx_late_collisions);
8565         ESTAT_ADD(tx_collide_2times);
8566         ESTAT_ADD(tx_collide_3times);
8567         ESTAT_ADD(tx_collide_4times);
8568         ESTAT_ADD(tx_collide_5times);
8569         ESTAT_ADD(tx_collide_6times);
8570         ESTAT_ADD(tx_collide_7times);
8571         ESTAT_ADD(tx_collide_8times);
8572         ESTAT_ADD(tx_collide_9times);
8573         ESTAT_ADD(tx_collide_10times);
8574         ESTAT_ADD(tx_collide_11times);
8575         ESTAT_ADD(tx_collide_12times);
8576         ESTAT_ADD(tx_collide_13times);
8577         ESTAT_ADD(tx_collide_14times);
8578         ESTAT_ADD(tx_collide_15times);
8579         ESTAT_ADD(tx_ucast_packets);
8580         ESTAT_ADD(tx_mcast_packets);
8581         ESTAT_ADD(tx_bcast_packets);
8582         ESTAT_ADD(tx_carrier_sense_errors);
8583         ESTAT_ADD(tx_discards);
8584         ESTAT_ADD(tx_errors);
8585
8586         ESTAT_ADD(dma_writeq_full);
8587         ESTAT_ADD(dma_write_prioq_full);
8588         ESTAT_ADD(rxbds_empty);
8589         ESTAT_ADD(rx_discards);
8590         ESTAT_ADD(rx_errors);
8591         ESTAT_ADD(rx_threshold_hit);
8592
8593         ESTAT_ADD(dma_readq_full);
8594         ESTAT_ADD(dma_read_prioq_full);
8595         ESTAT_ADD(tx_comp_queue_full);
8596
8597         ESTAT_ADD(ring_set_send_prod_index);
8598         ESTAT_ADD(ring_status_update);
8599         ESTAT_ADD(nic_irqs);
8600         ESTAT_ADD(nic_avoided_irqs);
8601         ESTAT_ADD(nic_tx_threshold_hit);
8602
8603         return estats;
8604 }
8605
8606 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8607 {
8608         struct tg3 *tp = netdev_priv(dev);
8609         struct net_device_stats *stats = &tp->net_stats;
8610         struct net_device_stats *old_stats = &tp->net_stats_prev;
8611         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8612
8613         if (!hw_stats)
8614                 return old_stats;
8615
8616         stats->rx_packets = old_stats->rx_packets +
8617                 get_stat64(&hw_stats->rx_ucast_packets) +
8618                 get_stat64(&hw_stats->rx_mcast_packets) +
8619                 get_stat64(&hw_stats->rx_bcast_packets);
8620
8621         stats->tx_packets = old_stats->tx_packets +
8622                 get_stat64(&hw_stats->tx_ucast_packets) +
8623                 get_stat64(&hw_stats->tx_mcast_packets) +
8624                 get_stat64(&hw_stats->tx_bcast_packets);
8625
8626         stats->rx_bytes = old_stats->rx_bytes +
8627                 get_stat64(&hw_stats->rx_octets);
8628         stats->tx_bytes = old_stats->tx_bytes +
8629                 get_stat64(&hw_stats->tx_octets);
8630
8631         stats->rx_errors = old_stats->rx_errors +
8632                 get_stat64(&hw_stats->rx_errors);
8633         stats->tx_errors = old_stats->tx_errors +
8634                 get_stat64(&hw_stats->tx_errors) +
8635                 get_stat64(&hw_stats->tx_mac_errors) +
8636                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8637                 get_stat64(&hw_stats->tx_discards);
8638
8639         stats->multicast = old_stats->multicast +
8640                 get_stat64(&hw_stats->rx_mcast_packets);
8641         stats->collisions = old_stats->collisions +
8642                 get_stat64(&hw_stats->tx_collisions);
8643
8644         stats->rx_length_errors = old_stats->rx_length_errors +
8645                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8646                 get_stat64(&hw_stats->rx_undersize_packets);
8647
8648         stats->rx_over_errors = old_stats->rx_over_errors +
8649                 get_stat64(&hw_stats->rxbds_empty);
8650         stats->rx_frame_errors = old_stats->rx_frame_errors +
8651                 get_stat64(&hw_stats->rx_align_errors);
8652         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8653                 get_stat64(&hw_stats->tx_discards);
8654         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8655                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8656
8657         stats->rx_crc_errors = old_stats->rx_crc_errors +
8658                 calc_crc_errors(tp);
8659
8660         stats->rx_missed_errors = old_stats->rx_missed_errors +
8661                 get_stat64(&hw_stats->rx_discards);
8662
8663         return stats;
8664 }
8665
8666 static inline u32 calc_crc(unsigned char *buf, int len)
8667 {
8668         u32 reg;
8669         u32 tmp;
8670         int j, k;
8671
8672         reg = 0xffffffff;
8673
8674         for (j = 0; j < len; j++) {
8675                 reg ^= buf[j];
8676
8677                 for (k = 0; k < 8; k++) {
8678                         tmp = reg & 0x01;
8679
8680                         reg >>= 1;
8681
8682                         if (tmp) {
8683                                 reg ^= 0xedb88320;
8684                         }
8685                 }
8686         }
8687
8688         return ~reg;
8689 }
8690
8691 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8692 {
8693         /* accept or reject all multicast frames */
8694         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8695         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8696         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8697         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8698 }
8699
8700 static void __tg3_set_rx_mode(struct net_device *dev)
8701 {
8702         struct tg3 *tp = netdev_priv(dev);
8703         u32 rx_mode;
8704
8705         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8706                                   RX_MODE_KEEP_VLAN_TAG);
8707
8708         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8709          * flag clear.
8710          */
8711 #if TG3_VLAN_TAG_USED
8712         if (!tp->vlgrp &&
8713             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8714                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8715 #else
8716         /* By definition, VLAN is disabled always in this
8717          * case.
8718          */
8719         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8720                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8721 #endif
8722
8723         if (dev->flags & IFF_PROMISC) {
8724                 /* Promiscuous mode. */
8725                 rx_mode |= RX_MODE_PROMISC;
8726         } else if (dev->flags & IFF_ALLMULTI) {
8727                 /* Accept all multicast. */
8728                 tg3_set_multi (tp, 1);
8729         } else if (dev->mc_count < 1) {
8730                 /* Reject all multicast. */
8731                 tg3_set_multi (tp, 0);
8732         } else {
8733                 /* Accept one or more multicast(s). */
8734                 struct dev_mc_list *mclist;
8735                 unsigned int i;
8736                 u32 mc_filter[4] = { 0, };
8737                 u32 regidx;
8738                 u32 bit;
8739                 u32 crc;
8740
8741                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8742                      i++, mclist = mclist->next) {
8743
8744                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8745                         bit = ~crc & 0x7f;
8746                         regidx = (bit & 0x60) >> 5;
8747                         bit &= 0x1f;
8748                         mc_filter[regidx] |= (1 << bit);
8749                 }
8750
8751                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8752                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8753                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8754                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8755         }
8756
8757         if (rx_mode != tp->rx_mode) {
8758                 tp->rx_mode = rx_mode;
8759                 tw32_f(MAC_RX_MODE, rx_mode);
8760                 udelay(10);
8761         }
8762 }
8763
8764 static void tg3_set_rx_mode(struct net_device *dev)
8765 {
8766         struct tg3 *tp = netdev_priv(dev);
8767
8768         if (!netif_running(dev))
8769                 return;
8770
8771         tg3_full_lock(tp, 0);
8772         __tg3_set_rx_mode(dev);
8773         tg3_full_unlock(tp);
8774 }
8775
8776 #define TG3_REGDUMP_LEN         (32 * 1024)
8777
8778 static int tg3_get_regs_len(struct net_device *dev)
8779 {
8780         return TG3_REGDUMP_LEN;
8781 }
8782
8783 static void tg3_get_regs(struct net_device *dev,
8784                 struct ethtool_regs *regs, void *_p)
8785 {
8786         u32 *p = _p;
8787         struct tg3 *tp = netdev_priv(dev);
8788         u8 *orig_p = _p;
8789         int i;
8790
8791         regs->version = 0;
8792
8793         memset(p, 0, TG3_REGDUMP_LEN);
8794
8795         if (tp->link_config.phy_is_low_power)
8796                 return;
8797
8798         tg3_full_lock(tp, 0);
8799
8800 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8801 #define GET_REG32_LOOP(base,len)                \
8802 do {    p = (u32 *)(orig_p + (base));           \
8803         for (i = 0; i < len; i += 4)            \
8804                 __GET_REG32((base) + i);        \
8805 } while (0)
8806 #define GET_REG32_1(reg)                        \
8807 do {    p = (u32 *)(orig_p + (reg));            \
8808         __GET_REG32((reg));                     \
8809 } while (0)
8810
8811         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8812         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8813         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8814         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8815         GET_REG32_1(SNDDATAC_MODE);
8816         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8817         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8818         GET_REG32_1(SNDBDC_MODE);
8819         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8820         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8821         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8822         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8823         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8824         GET_REG32_1(RCVDCC_MODE);
8825         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8826         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8827         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8828         GET_REG32_1(MBFREE_MODE);
8829         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8830         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8831         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8832         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8833         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8834         GET_REG32_1(RX_CPU_MODE);
8835         GET_REG32_1(RX_CPU_STATE);
8836         GET_REG32_1(RX_CPU_PGMCTR);
8837         GET_REG32_1(RX_CPU_HWBKPT);
8838         GET_REG32_1(TX_CPU_MODE);
8839         GET_REG32_1(TX_CPU_STATE);
8840         GET_REG32_1(TX_CPU_PGMCTR);
8841         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8842         GET_REG32_LOOP(FTQ_RESET, 0x120);
8843         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8844         GET_REG32_1(DMAC_MODE);
8845         GET_REG32_LOOP(GRC_MODE, 0x4c);
8846         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8847                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8848
8849 #undef __GET_REG32
8850 #undef GET_REG32_LOOP
8851 #undef GET_REG32_1
8852
8853         tg3_full_unlock(tp);
8854 }
8855
8856 static int tg3_get_eeprom_len(struct net_device *dev)
8857 {
8858         struct tg3 *tp = netdev_priv(dev);
8859
8860         return tp->nvram_size;
8861 }
8862
8863 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8864 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8865 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8866
8867 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8868 {
8869         struct tg3 *tp = netdev_priv(dev);
8870         int ret;
8871         u8  *pd;
8872         u32 i, offset, len, b_offset, b_count;
8873         __le32 val;
8874
8875         if (tp->link_config.phy_is_low_power)
8876                 return -EAGAIN;
8877
8878         offset = eeprom->offset;
8879         len = eeprom->len;
8880         eeprom->len = 0;
8881
8882         eeprom->magic = TG3_EEPROM_MAGIC;
8883
8884         if (offset & 3) {
8885                 /* adjustments to start on required 4 byte boundary */
8886                 b_offset = offset & 3;
8887                 b_count = 4 - b_offset;
8888                 if (b_count > len) {
8889                         /* i.e. offset=1 len=2 */
8890                         b_count = len;
8891                 }
8892                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8893                 if (ret)
8894                         return ret;
8895                 memcpy(data, ((char*)&val) + b_offset, b_count);
8896                 len -= b_count;
8897                 offset += b_count;
8898                 eeprom->len += b_count;
8899         }
8900
8901         /* read bytes upto the last 4 byte boundary */
8902         pd = &data[eeprom->len];
8903         for (i = 0; i < (len - (len & 3)); i += 4) {
8904                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8905                 if (ret) {
8906                         eeprom->len += i;
8907                         return ret;
8908                 }
8909                 memcpy(pd + i, &val, 4);
8910         }
8911         eeprom->len += i;
8912
8913         if (len & 3) {
8914                 /* read last bytes not ending on 4 byte boundary */
8915                 pd = &data[eeprom->len];
8916                 b_count = len & 3;
8917                 b_offset = offset + len - b_count;
8918                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8919                 if (ret)
8920                         return ret;
8921                 memcpy(pd, &val, b_count);
8922                 eeprom->len += b_count;
8923         }
8924         return 0;
8925 }
8926
8927 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8928
8929 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8930 {
8931         struct tg3 *tp = netdev_priv(dev);
8932         int ret;
8933         u32 offset, len, b_offset, odd_len;
8934         u8 *buf;
8935         __le32 start, end;
8936
8937         if (tp->link_config.phy_is_low_power)
8938                 return -EAGAIN;
8939
8940         if (eeprom->magic != TG3_EEPROM_MAGIC)
8941                 return -EINVAL;
8942
8943         offset = eeprom->offset;
8944         len = eeprom->len;
8945
8946         if ((b_offset = (offset & 3))) {
8947                 /* adjustments to start on required 4 byte boundary */
8948                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8949                 if (ret)
8950                         return ret;
8951                 len += b_offset;
8952                 offset &= ~3;
8953                 if (len < 4)
8954                         len = 4;
8955         }
8956
8957         odd_len = 0;
8958         if (len & 3) {
8959                 /* adjustments to end on required 4 byte boundary */
8960                 odd_len = 1;
8961                 len = (len + 3) & ~3;
8962                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8963                 if (ret)
8964                         return ret;
8965         }
8966
8967         buf = data;
8968         if (b_offset || odd_len) {
8969                 buf = kmalloc(len, GFP_KERNEL);
8970                 if (!buf)
8971                         return -ENOMEM;
8972                 if (b_offset)
8973                         memcpy(buf, &start, 4);
8974                 if (odd_len)
8975                         memcpy(buf+len-4, &end, 4);
8976                 memcpy(buf + b_offset, data, eeprom->len);
8977         }
8978
8979         ret = tg3_nvram_write_block(tp, offset, len, buf);
8980
8981         if (buf != data)
8982                 kfree(buf);
8983
8984         return ret;
8985 }
8986
8987 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8988 {
8989         struct tg3 *tp = netdev_priv(dev);
8990
8991         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8992                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8993                         return -EAGAIN;
8994                 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8995         }
8996
8997         cmd->supported = (SUPPORTED_Autoneg);
8998
8999         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9000                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9001                                    SUPPORTED_1000baseT_Full);
9002
9003         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9004                 cmd->supported |= (SUPPORTED_100baseT_Half |
9005                                   SUPPORTED_100baseT_Full |
9006                                   SUPPORTED_10baseT_Half |
9007                                   SUPPORTED_10baseT_Full |
9008                                   SUPPORTED_TP);
9009                 cmd->port = PORT_TP;
9010         } else {
9011                 cmd->supported |= SUPPORTED_FIBRE;
9012                 cmd->port = PORT_FIBRE;
9013         }
9014
9015         cmd->advertising = tp->link_config.advertising;
9016         if (netif_running(dev)) {
9017                 cmd->speed = tp->link_config.active_speed;
9018                 cmd->duplex = tp->link_config.active_duplex;
9019         }
9020         cmd->phy_address = PHY_ADDR;
9021         cmd->transceiver = 0;
9022         cmd->autoneg = tp->link_config.autoneg;
9023         cmd->maxtxpkt = 0;
9024         cmd->maxrxpkt = 0;
9025         return 0;
9026 }
9027
9028 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9029 {
9030         struct tg3 *tp = netdev_priv(dev);
9031
9032         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9033                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9034                         return -EAGAIN;
9035                 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9036         }
9037
9038         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9039                 /* These are the only valid advertisement bits allowed.  */
9040                 if (cmd->autoneg == AUTONEG_ENABLE &&
9041                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9042                                           ADVERTISED_1000baseT_Full |
9043                                           ADVERTISED_Autoneg |
9044                                           ADVERTISED_FIBRE)))
9045                         return -EINVAL;
9046                 /* Fiber can only do SPEED_1000.  */
9047                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9048                          (cmd->speed != SPEED_1000))
9049                         return -EINVAL;
9050         /* Copper cannot force SPEED_1000.  */
9051         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9052                    (cmd->speed == SPEED_1000))
9053                 return -EINVAL;
9054         else if ((cmd->speed == SPEED_1000) &&
9055                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9056                 return -EINVAL;
9057
9058         tg3_full_lock(tp, 0);
9059
9060         tp->link_config.autoneg = cmd->autoneg;
9061         if (cmd->autoneg == AUTONEG_ENABLE) {
9062                 tp->link_config.advertising = (cmd->advertising |
9063                                               ADVERTISED_Autoneg);
9064                 tp->link_config.speed = SPEED_INVALID;
9065                 tp->link_config.duplex = DUPLEX_INVALID;
9066         } else {
9067                 tp->link_config.advertising = 0;
9068                 tp->link_config.speed = cmd->speed;
9069                 tp->link_config.duplex = cmd->duplex;
9070         }
9071
9072         tp->link_config.orig_speed = tp->link_config.speed;
9073         tp->link_config.orig_duplex = tp->link_config.duplex;
9074         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9075
9076         if (netif_running(dev))
9077                 tg3_setup_phy(tp, 1);
9078
9079         tg3_full_unlock(tp);
9080
9081         return 0;
9082 }
9083
9084 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9085 {
9086         struct tg3 *tp = netdev_priv(dev);
9087
9088         strcpy(info->driver, DRV_MODULE_NAME);
9089         strcpy(info->version, DRV_MODULE_VERSION);
9090         strcpy(info->fw_version, tp->fw_ver);
9091         strcpy(info->bus_info, pci_name(tp->pdev));
9092 }
9093
9094 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9095 {
9096         struct tg3 *tp = netdev_priv(dev);
9097
9098         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9099             device_can_wakeup(&tp->pdev->dev))
9100                 wol->supported = WAKE_MAGIC;
9101         else
9102                 wol->supported = 0;
9103         wol->wolopts = 0;
9104         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9105             device_can_wakeup(&tp->pdev->dev))
9106                 wol->wolopts = WAKE_MAGIC;
9107         memset(&wol->sopass, 0, sizeof(wol->sopass));
9108 }
9109
9110 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9111 {
9112         struct tg3 *tp = netdev_priv(dev);
9113         struct device *dp = &tp->pdev->dev;
9114
9115         if (wol->wolopts & ~WAKE_MAGIC)
9116                 return -EINVAL;
9117         if ((wol->wolopts & WAKE_MAGIC) &&
9118             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9119                 return -EINVAL;
9120
9121         spin_lock_bh(&tp->lock);
9122         if (wol->wolopts & WAKE_MAGIC) {
9123                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9124                 device_set_wakeup_enable(dp, true);
9125         } else {
9126                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9127                 device_set_wakeup_enable(dp, false);
9128         }
9129         spin_unlock_bh(&tp->lock);
9130
9131         return 0;
9132 }
9133
9134 static u32 tg3_get_msglevel(struct net_device *dev)
9135 {
9136         struct tg3 *tp = netdev_priv(dev);
9137         return tp->msg_enable;
9138 }
9139
9140 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9141 {
9142         struct tg3 *tp = netdev_priv(dev);
9143         tp->msg_enable = value;
9144 }
9145
9146 static int tg3_set_tso(struct net_device *dev, u32 value)
9147 {
9148         struct tg3 *tp = netdev_priv(dev);
9149
9150         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9151                 if (value)
9152                         return -EINVAL;
9153                 return 0;
9154         }
9155         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9156             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9157                 if (value) {
9158                         dev->features |= NETIF_F_TSO6;
9159                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9160                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9161                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9162                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9163                                 dev->features |= NETIF_F_TSO_ECN;
9164                 } else
9165                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9166         }
9167         return ethtool_op_set_tso(dev, value);
9168 }
9169
9170 static int tg3_nway_reset(struct net_device *dev)
9171 {
9172         struct tg3 *tp = netdev_priv(dev);
9173         int r;
9174
9175         if (!netif_running(dev))
9176                 return -EAGAIN;
9177
9178         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9179                 return -EINVAL;
9180
9181         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9182                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9183                         return -EAGAIN;
9184                 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9185         } else {
9186                 u32 bmcr;
9187
9188                 spin_lock_bh(&tp->lock);
9189                 r = -EINVAL;
9190                 tg3_readphy(tp, MII_BMCR, &bmcr);
9191                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9192                     ((bmcr & BMCR_ANENABLE) ||
9193                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9194                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9195                                                    BMCR_ANENABLE);
9196                         r = 0;
9197                 }
9198                 spin_unlock_bh(&tp->lock);
9199         }
9200
9201         return r;
9202 }
9203
9204 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9205 {
9206         struct tg3 *tp = netdev_priv(dev);
9207
9208         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9209         ering->rx_mini_max_pending = 0;
9210         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9211                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9212         else
9213                 ering->rx_jumbo_max_pending = 0;
9214
9215         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9216
9217         ering->rx_pending = tp->rx_pending;
9218         ering->rx_mini_pending = 0;
9219         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9220                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9221         else
9222                 ering->rx_jumbo_pending = 0;
9223
9224         ering->tx_pending = tp->tx_pending;
9225 }
9226
9227 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9228 {
9229         struct tg3 *tp = netdev_priv(dev);
9230         int irq_sync = 0, err = 0;
9231
9232         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9233             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9234             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9235             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9236             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9237              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9238                 return -EINVAL;
9239
9240         if (netif_running(dev)) {
9241                 tg3_phy_stop(tp);
9242                 tg3_netif_stop(tp);
9243                 irq_sync = 1;
9244         }
9245
9246         tg3_full_lock(tp, irq_sync);
9247
9248         tp->rx_pending = ering->rx_pending;
9249
9250         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9251             tp->rx_pending > 63)
9252                 tp->rx_pending = 63;
9253         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9254         tp->tx_pending = ering->tx_pending;
9255
9256         if (netif_running(dev)) {
9257                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9258                 err = tg3_restart_hw(tp, 1);
9259                 if (!err)
9260                         tg3_netif_start(tp);
9261         }
9262
9263         tg3_full_unlock(tp);
9264
9265         if (irq_sync && !err)
9266                 tg3_phy_start(tp);
9267
9268         return err;
9269 }
9270
9271 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9272 {
9273         struct tg3 *tp = netdev_priv(dev);
9274
9275         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9276
9277         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9278                 epause->rx_pause = 1;
9279         else
9280                 epause->rx_pause = 0;
9281
9282         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9283                 epause->tx_pause = 1;
9284         else
9285                 epause->tx_pause = 0;
9286 }
9287
9288 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9289 {
9290         struct tg3 *tp = netdev_priv(dev);
9291         int err = 0;
9292
9293         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9294                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9295                         return -EAGAIN;
9296
9297                 if (epause->autoneg) {
9298                         u32 newadv;
9299                         struct phy_device *phydev;
9300
9301                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9302
9303                         if (epause->rx_pause) {
9304                                 if (epause->tx_pause)
9305                                         newadv = ADVERTISED_Pause;
9306                                 else
9307                                         newadv = ADVERTISED_Pause |
9308                                                  ADVERTISED_Asym_Pause;
9309                         } else if (epause->tx_pause) {
9310                                 newadv = ADVERTISED_Asym_Pause;
9311                         } else
9312                                 newadv = 0;
9313
9314                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9315                                 u32 oldadv = phydev->advertising &
9316                                              (ADVERTISED_Pause |
9317                                               ADVERTISED_Asym_Pause);
9318                                 if (oldadv != newadv) {
9319                                         phydev->advertising &=
9320                                                 ~(ADVERTISED_Pause |
9321                                                   ADVERTISED_Asym_Pause);
9322                                         phydev->advertising |= newadv;
9323                                         err = phy_start_aneg(phydev);
9324                                 }
9325                         } else {
9326                                 tp->link_config.advertising &=
9327                                                 ~(ADVERTISED_Pause |
9328                                                   ADVERTISED_Asym_Pause);
9329                                 tp->link_config.advertising |= newadv;
9330                         }
9331                 } else {
9332                         if (epause->rx_pause)
9333                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9334                         else
9335                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9336
9337                         if (epause->tx_pause)
9338                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9339                         else
9340                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9341
9342                         if (netif_running(dev))
9343                                 tg3_setup_flow_control(tp, 0, 0);
9344                 }
9345         } else {
9346                 int irq_sync = 0;
9347
9348                 if (netif_running(dev)) {
9349                         tg3_netif_stop(tp);
9350                         irq_sync = 1;
9351                 }
9352
9353                 tg3_full_lock(tp, irq_sync);
9354
9355                 if (epause->autoneg)
9356                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9357                 else
9358                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9359                 if (epause->rx_pause)
9360                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9361                 else
9362                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9363                 if (epause->tx_pause)
9364                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9365                 else
9366                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9367
9368                 if (netif_running(dev)) {
9369                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9370                         err = tg3_restart_hw(tp, 1);
9371                         if (!err)
9372                                 tg3_netif_start(tp);
9373                 }
9374
9375                 tg3_full_unlock(tp);
9376         }
9377
9378         return err;
9379 }
9380
9381 static u32 tg3_get_rx_csum(struct net_device *dev)
9382 {
9383         struct tg3 *tp = netdev_priv(dev);
9384         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9385 }
9386
9387 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9388 {
9389         struct tg3 *tp = netdev_priv(dev);
9390
9391         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9392                 if (data != 0)
9393                         return -EINVAL;
9394                 return 0;
9395         }
9396
9397         spin_lock_bh(&tp->lock);
9398         if (data)
9399                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9400         else
9401                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9402         spin_unlock_bh(&tp->lock);
9403
9404         return 0;
9405 }
9406
9407 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9408 {
9409         struct tg3 *tp = netdev_priv(dev);
9410
9411         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9412                 if (data != 0)
9413                         return -EINVAL;
9414                 return 0;
9415         }
9416
9417         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9418             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9419             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9420             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9421             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9422                 ethtool_op_set_tx_ipv6_csum(dev, data);
9423         else
9424                 ethtool_op_set_tx_csum(dev, data);
9425
9426         return 0;
9427 }
9428
9429 static int tg3_get_sset_count (struct net_device *dev, int sset)
9430 {
9431         switch (sset) {
9432         case ETH_SS_TEST:
9433                 return TG3_NUM_TEST;
9434         case ETH_SS_STATS:
9435                 return TG3_NUM_STATS;
9436         default:
9437                 return -EOPNOTSUPP;
9438         }
9439 }
9440
9441 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9442 {
9443         switch (stringset) {
9444         case ETH_SS_STATS:
9445                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9446                 break;
9447         case ETH_SS_TEST:
9448                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9449                 break;
9450         default:
9451                 WARN_ON(1);     /* we need a WARN() */
9452                 break;
9453         }
9454 }
9455
9456 static int tg3_phys_id(struct net_device *dev, u32 data)
9457 {
9458         struct tg3 *tp = netdev_priv(dev);
9459         int i;
9460
9461         if (!netif_running(tp->dev))
9462                 return -EAGAIN;
9463
9464         if (data == 0)
9465                 data = UINT_MAX / 2;
9466
9467         for (i = 0; i < (data * 2); i++) {
9468                 if ((i % 2) == 0)
9469                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9470                                            LED_CTRL_1000MBPS_ON |
9471                                            LED_CTRL_100MBPS_ON |
9472                                            LED_CTRL_10MBPS_ON |
9473                                            LED_CTRL_TRAFFIC_OVERRIDE |
9474                                            LED_CTRL_TRAFFIC_BLINK |
9475                                            LED_CTRL_TRAFFIC_LED);
9476
9477                 else
9478                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9479                                            LED_CTRL_TRAFFIC_OVERRIDE);
9480
9481                 if (msleep_interruptible(500))
9482                         break;
9483         }
9484         tw32(MAC_LED_CTRL, tp->led_ctrl);
9485         return 0;
9486 }
9487
9488 static void tg3_get_ethtool_stats (struct net_device *dev,
9489                                    struct ethtool_stats *estats, u64 *tmp_stats)
9490 {
9491         struct tg3 *tp = netdev_priv(dev);
9492         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9493 }
9494
9495 #define NVRAM_TEST_SIZE 0x100
9496 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9497 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9498 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9499 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9500 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9501
9502 static int tg3_test_nvram(struct tg3 *tp)
9503 {
9504         u32 csum, magic;
9505         __le32 *buf;
9506         int i, j, k, err = 0, size;
9507
9508         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9509                 return -EIO;
9510
9511         if (magic == TG3_EEPROM_MAGIC)
9512                 size = NVRAM_TEST_SIZE;
9513         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9514                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9515                     TG3_EEPROM_SB_FORMAT_1) {
9516                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9517                         case TG3_EEPROM_SB_REVISION_0:
9518                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9519                                 break;
9520                         case TG3_EEPROM_SB_REVISION_2:
9521                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9522                                 break;
9523                         case TG3_EEPROM_SB_REVISION_3:
9524                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9525                                 break;
9526                         default:
9527                                 return 0;
9528                         }
9529                 } else
9530                         return 0;
9531         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9532                 size = NVRAM_SELFBOOT_HW_SIZE;
9533         else
9534                 return -EIO;
9535
9536         buf = kmalloc(size, GFP_KERNEL);
9537         if (buf == NULL)
9538                 return -ENOMEM;
9539
9540         err = -EIO;
9541         for (i = 0, j = 0; i < size; i += 4, j++) {
9542                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9543                         break;
9544         }
9545         if (i < size)
9546                 goto out;
9547
9548         /* Selfboot format */
9549         magic = swab32(le32_to_cpu(buf[0]));
9550         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9551             TG3_EEPROM_MAGIC_FW) {
9552                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9553
9554                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9555                     TG3_EEPROM_SB_REVISION_2) {
9556                         /* For rev 2, the csum doesn't include the MBA. */
9557                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9558                                 csum8 += buf8[i];
9559                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9560                                 csum8 += buf8[i];
9561                 } else {
9562                         for (i = 0; i < size; i++)
9563                                 csum8 += buf8[i];
9564                 }
9565
9566                 if (csum8 == 0) {
9567                         err = 0;
9568                         goto out;
9569                 }
9570
9571                 err = -EIO;
9572                 goto out;
9573         }
9574
9575         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9576             TG3_EEPROM_MAGIC_HW) {
9577                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9578                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9579                 u8 *buf8 = (u8 *) buf;
9580
9581                 /* Separate the parity bits and the data bytes.  */
9582                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9583                         if ((i == 0) || (i == 8)) {
9584                                 int l;
9585                                 u8 msk;
9586
9587                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9588                                         parity[k++] = buf8[i] & msk;
9589                                 i++;
9590                         }
9591                         else if (i == 16) {
9592                                 int l;
9593                                 u8 msk;
9594
9595                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9596                                         parity[k++] = buf8[i] & msk;
9597                                 i++;
9598
9599                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9600                                         parity[k++] = buf8[i] & msk;
9601                                 i++;
9602                         }
9603                         data[j++] = buf8[i];
9604                 }
9605
9606                 err = -EIO;
9607                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9608                         u8 hw8 = hweight8(data[i]);
9609
9610                         if ((hw8 & 0x1) && parity[i])
9611                                 goto out;
9612                         else if (!(hw8 & 0x1) && !parity[i])
9613                                 goto out;
9614                 }
9615                 err = 0;
9616                 goto out;
9617         }
9618
9619         /* Bootstrap checksum at offset 0x10 */
9620         csum = calc_crc((unsigned char *) buf, 0x10);
9621         if(csum != le32_to_cpu(buf[0x10/4]))
9622                 goto out;
9623
9624         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9625         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9626         if (csum != le32_to_cpu(buf[0xfc/4]))
9627                  goto out;
9628
9629         err = 0;
9630
9631 out:
9632         kfree(buf);
9633         return err;
9634 }
9635
9636 #define TG3_SERDES_TIMEOUT_SEC  2
9637 #define TG3_COPPER_TIMEOUT_SEC  6
9638
9639 static int tg3_test_link(struct tg3 *tp)
9640 {
9641         int i, max;
9642
9643         if (!netif_running(tp->dev))
9644                 return -ENODEV;
9645
9646         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9647                 max = TG3_SERDES_TIMEOUT_SEC;
9648         else
9649                 max = TG3_COPPER_TIMEOUT_SEC;
9650
9651         for (i = 0; i < max; i++) {
9652                 if (netif_carrier_ok(tp->dev))
9653                         return 0;
9654
9655                 if (msleep_interruptible(1000))
9656                         break;
9657         }
9658
9659         return -EIO;
9660 }
9661
9662 /* Only test the commonly used registers */
9663 static int tg3_test_registers(struct tg3 *tp)
9664 {
9665         int i, is_5705, is_5750;
9666         u32 offset, read_mask, write_mask, val, save_val, read_val;
9667         static struct {
9668                 u16 offset;
9669                 u16 flags;
9670 #define TG3_FL_5705     0x1
9671 #define TG3_FL_NOT_5705 0x2
9672 #define TG3_FL_NOT_5788 0x4
9673 #define TG3_FL_NOT_5750 0x8
9674                 u32 read_mask;
9675                 u32 write_mask;
9676         } reg_tbl[] = {
9677                 /* MAC Control Registers */
9678                 { MAC_MODE, TG3_FL_NOT_5705,
9679                         0x00000000, 0x00ef6f8c },
9680                 { MAC_MODE, TG3_FL_5705,
9681                         0x00000000, 0x01ef6b8c },
9682                 { MAC_STATUS, TG3_FL_NOT_5705,
9683                         0x03800107, 0x00000000 },
9684                 { MAC_STATUS, TG3_FL_5705,
9685                         0x03800100, 0x00000000 },
9686                 { MAC_ADDR_0_HIGH, 0x0000,
9687                         0x00000000, 0x0000ffff },
9688                 { MAC_ADDR_0_LOW, 0x0000,
9689                         0x00000000, 0xffffffff },
9690                 { MAC_RX_MTU_SIZE, 0x0000,
9691                         0x00000000, 0x0000ffff },
9692                 { MAC_TX_MODE, 0x0000,
9693                         0x00000000, 0x00000070 },
9694                 { MAC_TX_LENGTHS, 0x0000,
9695                         0x00000000, 0x00003fff },
9696                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9697                         0x00000000, 0x000007fc },
9698                 { MAC_RX_MODE, TG3_FL_5705,
9699                         0x00000000, 0x000007dc },
9700                 { MAC_HASH_REG_0, 0x0000,
9701                         0x00000000, 0xffffffff },
9702                 { MAC_HASH_REG_1, 0x0000,
9703                         0x00000000, 0xffffffff },
9704                 { MAC_HASH_REG_2, 0x0000,
9705                         0x00000000, 0xffffffff },
9706                 { MAC_HASH_REG_3, 0x0000,
9707                         0x00000000, 0xffffffff },
9708
9709                 /* Receive Data and Receive BD Initiator Control Registers. */
9710                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9711                         0x00000000, 0xffffffff },
9712                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9713                         0x00000000, 0xffffffff },
9714                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9715                         0x00000000, 0x00000003 },
9716                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9717                         0x00000000, 0xffffffff },
9718                 { RCVDBDI_STD_BD+0, 0x0000,
9719                         0x00000000, 0xffffffff },
9720                 { RCVDBDI_STD_BD+4, 0x0000,
9721                         0x00000000, 0xffffffff },
9722                 { RCVDBDI_STD_BD+8, 0x0000,
9723                         0x00000000, 0xffff0002 },
9724                 { RCVDBDI_STD_BD+0xc, 0x0000,
9725                         0x00000000, 0xffffffff },
9726
9727                 /* Receive BD Initiator Control Registers. */
9728                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9729                         0x00000000, 0xffffffff },
9730                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9731                         0x00000000, 0x000003ff },
9732                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9733                         0x00000000, 0xffffffff },
9734
9735                 /* Host Coalescing Control Registers. */
9736                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9737                         0x00000000, 0x00000004 },
9738                 { HOSTCC_MODE, TG3_FL_5705,
9739                         0x00000000, 0x000000f6 },
9740                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9741                         0x00000000, 0xffffffff },
9742                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9743                         0x00000000, 0x000003ff },
9744                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9745                         0x00000000, 0xffffffff },
9746                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9747                         0x00000000, 0x000003ff },
9748                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9749                         0x00000000, 0xffffffff },
9750                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9751                         0x00000000, 0x000000ff },
9752                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9753                         0x00000000, 0xffffffff },
9754                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9755                         0x00000000, 0x000000ff },
9756                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9757                         0x00000000, 0xffffffff },
9758                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9759                         0x00000000, 0xffffffff },
9760                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9761                         0x00000000, 0xffffffff },
9762                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9763                         0x00000000, 0x000000ff },
9764                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9765                         0x00000000, 0xffffffff },
9766                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9767                         0x00000000, 0x000000ff },
9768                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9769                         0x00000000, 0xffffffff },
9770                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9771                         0x00000000, 0xffffffff },
9772                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9773                         0x00000000, 0xffffffff },
9774                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9775                         0x00000000, 0xffffffff },
9776                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9777                         0x00000000, 0xffffffff },
9778                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9779                         0xffffffff, 0x00000000 },
9780                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9781                         0xffffffff, 0x00000000 },
9782
9783                 /* Buffer Manager Control Registers. */
9784                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9785                         0x00000000, 0x007fff80 },
9786                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9787                         0x00000000, 0x007fffff },
9788                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9789                         0x00000000, 0x0000003f },
9790                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9791                         0x00000000, 0x000001ff },
9792                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9793                         0x00000000, 0x000001ff },
9794                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9795                         0xffffffff, 0x00000000 },
9796                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9797                         0xffffffff, 0x00000000 },
9798
9799                 /* Mailbox Registers */
9800                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9801                         0x00000000, 0x000001ff },
9802                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9803                         0x00000000, 0x000001ff },
9804                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9805                         0x00000000, 0x000007ff },
9806                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9807                         0x00000000, 0x000001ff },
9808
9809                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9810         };
9811
9812         is_5705 = is_5750 = 0;
9813         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9814                 is_5705 = 1;
9815                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9816                         is_5750 = 1;
9817         }
9818
9819         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9820                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9821                         continue;
9822
9823                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9824                         continue;
9825
9826                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9827                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9828                         continue;
9829
9830                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9831                         continue;
9832
9833                 offset = (u32) reg_tbl[i].offset;
9834                 read_mask = reg_tbl[i].read_mask;
9835                 write_mask = reg_tbl[i].write_mask;
9836
9837                 /* Save the original register content */
9838                 save_val = tr32(offset);
9839
9840                 /* Determine the read-only value. */
9841                 read_val = save_val & read_mask;
9842
9843                 /* Write zero to the register, then make sure the read-only bits
9844                  * are not changed and the read/write bits are all zeros.
9845                  */
9846                 tw32(offset, 0);
9847
9848                 val = tr32(offset);
9849
9850                 /* Test the read-only and read/write bits. */
9851                 if (((val & read_mask) != read_val) || (val & write_mask))
9852                         goto out;
9853
9854                 /* Write ones to all the bits defined by RdMask and WrMask, then
9855                  * make sure the read-only bits are not changed and the
9856                  * read/write bits are all ones.
9857                  */
9858                 tw32(offset, read_mask | write_mask);
9859
9860                 val = tr32(offset);
9861
9862                 /* Test the read-only bits. */
9863                 if ((val & read_mask) != read_val)
9864                         goto out;
9865
9866                 /* Test the read/write bits. */
9867                 if ((val & write_mask) != write_mask)
9868                         goto out;
9869
9870                 tw32(offset, save_val);
9871         }
9872
9873         return 0;
9874
9875 out:
9876         if (netif_msg_hw(tp))
9877                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9878                        offset);
9879         tw32(offset, save_val);
9880         return -EIO;
9881 }
9882
9883 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9884 {
9885         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9886         int i;
9887         u32 j;
9888
9889         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9890                 for (j = 0; j < len; j += 4) {
9891                         u32 val;
9892
9893                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9894                         tg3_read_mem(tp, offset + j, &val);
9895                         if (val != test_pattern[i])
9896                                 return -EIO;
9897                 }
9898         }
9899         return 0;
9900 }
9901
9902 static int tg3_test_memory(struct tg3 *tp)
9903 {
9904         static struct mem_entry {
9905                 u32 offset;
9906                 u32 len;
9907         } mem_tbl_570x[] = {
9908                 { 0x00000000, 0x00b50},
9909                 { 0x00002000, 0x1c000},
9910                 { 0xffffffff, 0x00000}
9911         }, mem_tbl_5705[] = {
9912                 { 0x00000100, 0x0000c},
9913                 { 0x00000200, 0x00008},
9914                 { 0x00004000, 0x00800},
9915                 { 0x00006000, 0x01000},
9916                 { 0x00008000, 0x02000},
9917                 { 0x00010000, 0x0e000},
9918                 { 0xffffffff, 0x00000}
9919         }, mem_tbl_5755[] = {
9920                 { 0x00000200, 0x00008},
9921                 { 0x00004000, 0x00800},
9922                 { 0x00006000, 0x00800},
9923                 { 0x00008000, 0x02000},
9924                 { 0x00010000, 0x0c000},
9925                 { 0xffffffff, 0x00000}
9926         }, mem_tbl_5906[] = {
9927                 { 0x00000200, 0x00008},
9928                 { 0x00004000, 0x00400},
9929                 { 0x00006000, 0x00400},
9930                 { 0x00008000, 0x01000},
9931                 { 0x00010000, 0x01000},
9932                 { 0xffffffff, 0x00000}
9933         };
9934         struct mem_entry *mem_tbl;
9935         int err = 0;
9936         int i;
9937
9938         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9939                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9940                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9941                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9942                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9943                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9944                         mem_tbl = mem_tbl_5755;
9945                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9946                         mem_tbl = mem_tbl_5906;
9947                 else
9948                         mem_tbl = mem_tbl_5705;
9949         } else
9950                 mem_tbl = mem_tbl_570x;
9951
9952         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9953                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9954                     mem_tbl[i].len)) != 0)
9955                         break;
9956         }
9957
9958         return err;
9959 }
9960
9961 #define TG3_MAC_LOOPBACK        0
9962 #define TG3_PHY_LOOPBACK        1
9963
9964 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9965 {
9966         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9967         u32 desc_idx;
9968         struct sk_buff *skb, *rx_skb;
9969         u8 *tx_data;
9970         dma_addr_t map;
9971         int num_pkts, tx_len, rx_len, i, err;
9972         struct tg3_rx_buffer_desc *desc;
9973
9974         if (loopback_mode == TG3_MAC_LOOPBACK) {
9975                 /* HW errata - mac loopback fails in some cases on 5780.
9976                  * Normal traffic and PHY loopback are not affected by
9977                  * errata.
9978                  */
9979                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9980                         return 0;
9981
9982                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9983                            MAC_MODE_PORT_INT_LPBACK;
9984                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9985                         mac_mode |= MAC_MODE_LINK_POLARITY;
9986                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9987                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9988                 else
9989                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9990                 tw32(MAC_MODE, mac_mode);
9991         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9992                 u32 val;
9993
9994                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9995                         u32 phytest;
9996
9997                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9998                                 u32 phy;
9999
10000                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10001                                              phytest | MII_TG3_EPHY_SHADOW_EN);
10002                                 if (!tg3_readphy(tp, 0x1b, &phy))
10003                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
10004                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10005                         }
10006                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10007                 } else
10008                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10009
10010                 tg3_phy_toggle_automdix(tp, 0);
10011
10012                 tg3_writephy(tp, MII_BMCR, val);
10013                 udelay(40);
10014
10015                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10016                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10017                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
10018                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10019                 } else
10020                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10021
10022                 /* reset to prevent losing 1st rx packet intermittently */
10023                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10024                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10025                         udelay(10);
10026                         tw32_f(MAC_RX_MODE, tp->rx_mode);
10027                 }
10028                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10029                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10030                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10031                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10032                                 mac_mode |= MAC_MODE_LINK_POLARITY;
10033                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
10034                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10035                 }
10036                 tw32(MAC_MODE, mac_mode);
10037         }
10038         else
10039                 return -EINVAL;
10040
10041         err = -EIO;
10042
10043         tx_len = 1514;
10044         skb = netdev_alloc_skb(tp->dev, tx_len);
10045         if (!skb)
10046                 return -ENOMEM;
10047
10048         tx_data = skb_put(skb, tx_len);
10049         memcpy(tx_data, tp->dev->dev_addr, 6);
10050         memset(tx_data + 6, 0x0, 8);
10051
10052         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10053
10054         for (i = 14; i < tx_len; i++)
10055                 tx_data[i] = (u8) (i & 0xff);
10056
10057         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10058
10059         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10060              HOSTCC_MODE_NOW);
10061
10062         udelay(10);
10063
10064         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10065
10066         num_pkts = 0;
10067
10068         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10069
10070         tp->tx_prod++;
10071         num_pkts++;
10072
10073         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10074                      tp->tx_prod);
10075         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10076
10077         udelay(10);
10078
10079         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10080         for (i = 0; i < 25; i++) {
10081                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10082                        HOSTCC_MODE_NOW);
10083
10084                 udelay(10);
10085
10086                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10087                 rx_idx = tp->hw_status->idx[0].rx_producer;
10088                 if ((tx_idx == tp->tx_prod) &&
10089                     (rx_idx == (rx_start_idx + num_pkts)))
10090                         break;
10091         }
10092
10093         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10094         dev_kfree_skb(skb);
10095
10096         if (tx_idx != tp->tx_prod)
10097                 goto out;
10098
10099         if (rx_idx != rx_start_idx + num_pkts)
10100                 goto out;
10101
10102         desc = &tp->rx_rcb[rx_start_idx];
10103         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10104         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10105         if (opaque_key != RXD_OPAQUE_RING_STD)
10106                 goto out;
10107
10108         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10109             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10110                 goto out;
10111
10112         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10113         if (rx_len != tx_len)
10114                 goto out;
10115
10116         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10117
10118         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10119         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10120
10121         for (i = 14; i < tx_len; i++) {
10122                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10123                         goto out;
10124         }
10125         err = 0;
10126
10127         /* tg3_free_rings will unmap and free the rx_skb */
10128 out:
10129         return err;
10130 }
10131
10132 #define TG3_MAC_LOOPBACK_FAILED         1
10133 #define TG3_PHY_LOOPBACK_FAILED         2
10134 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10135                                          TG3_PHY_LOOPBACK_FAILED)
10136
10137 static int tg3_test_loopback(struct tg3 *tp)
10138 {
10139         int err = 0;
10140         u32 cpmuctrl = 0;
10141
10142         if (!netif_running(tp->dev))
10143                 return TG3_LOOPBACK_FAILED;
10144
10145         err = tg3_reset_hw(tp, 1);
10146         if (err)
10147                 return TG3_LOOPBACK_FAILED;
10148
10149         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10150             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10151             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10152                 int i;
10153                 u32 status;
10154
10155                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10156
10157                 /* Wait for up to 40 microseconds to acquire lock. */
10158                 for (i = 0; i < 4; i++) {
10159                         status = tr32(TG3_CPMU_MUTEX_GNT);
10160                         if (status == CPMU_MUTEX_GNT_DRIVER)
10161                                 break;
10162                         udelay(10);
10163                 }
10164
10165                 if (status != CPMU_MUTEX_GNT_DRIVER)
10166                         return TG3_LOOPBACK_FAILED;
10167
10168                 /* Turn off link-based power management. */
10169                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10170                 tw32(TG3_CPMU_CTRL,
10171                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10172                                   CPMU_CTRL_LINK_AWARE_MODE));
10173         }
10174
10175         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10176                 err |= TG3_MAC_LOOPBACK_FAILED;
10177
10178         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10179             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10180             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10181                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10182
10183                 /* Release the mutex */
10184                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10185         }
10186
10187         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10188             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10189                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10190                         err |= TG3_PHY_LOOPBACK_FAILED;
10191         }
10192
10193         return err;
10194 }
10195
10196 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10197                           u64 *data)
10198 {
10199         struct tg3 *tp = netdev_priv(dev);
10200
10201         if (tp->link_config.phy_is_low_power)
10202                 tg3_set_power_state(tp, PCI_D0);
10203
10204         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10205
10206         if (tg3_test_nvram(tp) != 0) {
10207                 etest->flags |= ETH_TEST_FL_FAILED;
10208                 data[0] = 1;
10209         }
10210         if (tg3_test_link(tp) != 0) {
10211                 etest->flags |= ETH_TEST_FL_FAILED;
10212                 data[1] = 1;
10213         }
10214         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10215                 int err, err2 = 0, irq_sync = 0;
10216
10217                 if (netif_running(dev)) {
10218                         tg3_phy_stop(tp);
10219                         tg3_netif_stop(tp);
10220                         irq_sync = 1;
10221                 }
10222
10223                 tg3_full_lock(tp, irq_sync);
10224
10225                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10226                 err = tg3_nvram_lock(tp);
10227                 tg3_halt_cpu(tp, RX_CPU_BASE);
10228                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10229                         tg3_halt_cpu(tp, TX_CPU_BASE);
10230                 if (!err)
10231                         tg3_nvram_unlock(tp);
10232
10233                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10234                         tg3_phy_reset(tp);
10235
10236                 if (tg3_test_registers(tp) != 0) {
10237                         etest->flags |= ETH_TEST_FL_FAILED;
10238                         data[2] = 1;
10239                 }
10240                 if (tg3_test_memory(tp) != 0) {
10241                         etest->flags |= ETH_TEST_FL_FAILED;
10242                         data[3] = 1;
10243                 }
10244                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10245                         etest->flags |= ETH_TEST_FL_FAILED;
10246
10247                 tg3_full_unlock(tp);
10248
10249                 if (tg3_test_interrupt(tp) != 0) {
10250                         etest->flags |= ETH_TEST_FL_FAILED;
10251                         data[5] = 1;
10252                 }
10253
10254                 tg3_full_lock(tp, 0);
10255
10256                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10257                 if (netif_running(dev)) {
10258                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10259                         err2 = tg3_restart_hw(tp, 1);
10260                         if (!err2)
10261                                 tg3_netif_start(tp);
10262                 }
10263
10264                 tg3_full_unlock(tp);
10265
10266                 if (irq_sync && !err2)
10267                         tg3_phy_start(tp);
10268         }
10269         if (tp->link_config.phy_is_low_power)
10270                 tg3_set_power_state(tp, PCI_D3hot);
10271
10272 }
10273
10274 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10275 {
10276         struct mii_ioctl_data *data = if_mii(ifr);
10277         struct tg3 *tp = netdev_priv(dev);
10278         int err;
10279
10280         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10281                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10282                         return -EAGAIN;
10283                 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10284         }
10285
10286         switch(cmd) {
10287         case SIOCGMIIPHY:
10288                 data->phy_id = PHY_ADDR;
10289
10290                 /* fallthru */
10291         case SIOCGMIIREG: {
10292                 u32 mii_regval;
10293
10294                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10295                         break;                  /* We have no PHY */
10296
10297                 if (tp->link_config.phy_is_low_power)
10298                         return -EAGAIN;
10299
10300                 spin_lock_bh(&tp->lock);
10301                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10302                 spin_unlock_bh(&tp->lock);
10303
10304                 data->val_out = mii_regval;
10305
10306                 return err;
10307         }
10308
10309         case SIOCSMIIREG:
10310                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10311                         break;                  /* We have no PHY */
10312
10313                 if (!capable(CAP_NET_ADMIN))
10314                         return -EPERM;
10315
10316                 if (tp->link_config.phy_is_low_power)
10317                         return -EAGAIN;
10318
10319                 spin_lock_bh(&tp->lock);
10320                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10321                 spin_unlock_bh(&tp->lock);
10322
10323                 return err;
10324
10325         default:
10326                 /* do nothing */
10327                 break;
10328         }
10329         return -EOPNOTSUPP;
10330 }
10331
10332 #if TG3_VLAN_TAG_USED
10333 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10334 {
10335         struct tg3 *tp = netdev_priv(dev);
10336
10337         if (netif_running(dev))
10338                 tg3_netif_stop(tp);
10339
10340         tg3_full_lock(tp, 0);
10341
10342         tp->vlgrp = grp;
10343
10344         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10345         __tg3_set_rx_mode(dev);
10346
10347         if (netif_running(dev))
10348                 tg3_netif_start(tp);
10349
10350         tg3_full_unlock(tp);
10351 }
10352 #endif
10353
10354 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10355 {
10356         struct tg3 *tp = netdev_priv(dev);
10357
10358         memcpy(ec, &tp->coal, sizeof(*ec));
10359         return 0;
10360 }
10361
10362 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10363 {
10364         struct tg3 *tp = netdev_priv(dev);
10365         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10366         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10367
10368         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10369                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10370                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10371                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10372                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10373         }
10374
10375         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10376             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10377             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10378             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10379             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10380             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10381             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10382             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10383             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10384             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10385                 return -EINVAL;
10386
10387         /* No rx interrupts will be generated if both are zero */
10388         if ((ec->rx_coalesce_usecs == 0) &&
10389             (ec->rx_max_coalesced_frames == 0))
10390                 return -EINVAL;
10391
10392         /* No tx interrupts will be generated if both are zero */
10393         if ((ec->tx_coalesce_usecs == 0) &&
10394             (ec->tx_max_coalesced_frames == 0))
10395                 return -EINVAL;
10396
10397         /* Only copy relevant parameters, ignore all others. */
10398         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10399         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10400         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10401         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10402         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10403         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10404         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10405         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10406         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10407
10408         if (netif_running(dev)) {
10409                 tg3_full_lock(tp, 0);
10410                 __tg3_set_coalesce(tp, &tp->coal);
10411                 tg3_full_unlock(tp);
10412         }
10413         return 0;
10414 }
10415
10416 static const struct ethtool_ops tg3_ethtool_ops = {
10417         .get_settings           = tg3_get_settings,
10418         .set_settings           = tg3_set_settings,
10419         .get_drvinfo            = tg3_get_drvinfo,
10420         .get_regs_len           = tg3_get_regs_len,
10421         .get_regs               = tg3_get_regs,
10422         .get_wol                = tg3_get_wol,
10423         .set_wol                = tg3_set_wol,
10424         .get_msglevel           = tg3_get_msglevel,
10425         .set_msglevel           = tg3_set_msglevel,
10426         .nway_reset             = tg3_nway_reset,
10427         .get_link               = ethtool_op_get_link,
10428         .get_eeprom_len         = tg3_get_eeprom_len,
10429         .get_eeprom             = tg3_get_eeprom,
10430         .set_eeprom             = tg3_set_eeprom,
10431         .get_ringparam          = tg3_get_ringparam,
10432         .set_ringparam          = tg3_set_ringparam,
10433         .get_pauseparam         = tg3_get_pauseparam,
10434         .set_pauseparam         = tg3_set_pauseparam,
10435         .get_rx_csum            = tg3_get_rx_csum,
10436         .set_rx_csum            = tg3_set_rx_csum,
10437         .set_tx_csum            = tg3_set_tx_csum,
10438         .set_sg                 = ethtool_op_set_sg,
10439         .set_tso                = tg3_set_tso,
10440         .self_test              = tg3_self_test,
10441         .get_strings            = tg3_get_strings,
10442         .phys_id                = tg3_phys_id,
10443         .get_ethtool_stats      = tg3_get_ethtool_stats,
10444         .get_coalesce           = tg3_get_coalesce,
10445         .set_coalesce           = tg3_set_coalesce,
10446         .get_sset_count         = tg3_get_sset_count,
10447 };
10448
10449 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10450 {
10451         u32 cursize, val, magic;
10452
10453         tp->nvram_size = EEPROM_CHIP_SIZE;
10454
10455         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10456                 return;
10457
10458         if ((magic != TG3_EEPROM_MAGIC) &&
10459             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10460             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10461                 return;
10462
10463         /*
10464          * Size the chip by reading offsets at increasing powers of two.
10465          * When we encounter our validation signature, we know the addressing
10466          * has wrapped around, and thus have our chip size.
10467          */
10468         cursize = 0x10;
10469
10470         while (cursize < tp->nvram_size) {
10471                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10472                         return;
10473
10474                 if (val == magic)
10475                         break;
10476
10477                 cursize <<= 1;
10478         }
10479
10480         tp->nvram_size = cursize;
10481 }
10482
10483 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10484 {
10485         u32 val;
10486
10487         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10488                 return;
10489
10490         /* Selfboot format */
10491         if (val != TG3_EEPROM_MAGIC) {
10492                 tg3_get_eeprom_size(tp);
10493                 return;
10494         }
10495
10496         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10497                 if (val != 0) {
10498                         tp->nvram_size = (val >> 16) * 1024;
10499                         return;
10500                 }
10501         }
10502         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10503 }
10504
10505 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10506 {
10507         u32 nvcfg1;
10508
10509         nvcfg1 = tr32(NVRAM_CFG1);
10510         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10511                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10512         }
10513         else {
10514                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10515                 tw32(NVRAM_CFG1, nvcfg1);
10516         }
10517
10518         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10519             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10520                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10521                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10522                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10523                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10524                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10525                                 break;
10526                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10527                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10528                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10529                                 break;
10530                         case FLASH_VENDOR_ATMEL_EEPROM:
10531                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10532                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10533                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10534                                 break;
10535                         case FLASH_VENDOR_ST:
10536                                 tp->nvram_jedecnum = JEDEC_ST;
10537                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10538                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10539                                 break;
10540                         case FLASH_VENDOR_SAIFUN:
10541                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10542                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10543                                 break;
10544                         case FLASH_VENDOR_SST_SMALL:
10545                         case FLASH_VENDOR_SST_LARGE:
10546                                 tp->nvram_jedecnum = JEDEC_SST;
10547                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10548                                 break;
10549                 }
10550         }
10551         else {
10552                 tp->nvram_jedecnum = JEDEC_ATMEL;
10553                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10554                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10555         }
10556 }
10557
10558 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10559 {
10560         u32 nvcfg1;
10561
10562         nvcfg1 = tr32(NVRAM_CFG1);
10563
10564         /* NVRAM protection for TPM */
10565         if (nvcfg1 & (1 << 27))
10566                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10567
10568         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10569                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10570                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10571                         tp->nvram_jedecnum = JEDEC_ATMEL;
10572                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10573                         break;
10574                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10575                         tp->nvram_jedecnum = JEDEC_ATMEL;
10576                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10577                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10578                         break;
10579                 case FLASH_5752VENDOR_ST_M45PE10:
10580                 case FLASH_5752VENDOR_ST_M45PE20:
10581                 case FLASH_5752VENDOR_ST_M45PE40:
10582                         tp->nvram_jedecnum = JEDEC_ST;
10583                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10584                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10585                         break;
10586         }
10587
10588         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10589                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10590                         case FLASH_5752PAGE_SIZE_256:
10591                                 tp->nvram_pagesize = 256;
10592                                 break;
10593                         case FLASH_5752PAGE_SIZE_512:
10594                                 tp->nvram_pagesize = 512;
10595                                 break;
10596                         case FLASH_5752PAGE_SIZE_1K:
10597                                 tp->nvram_pagesize = 1024;
10598                                 break;
10599                         case FLASH_5752PAGE_SIZE_2K:
10600                                 tp->nvram_pagesize = 2048;
10601                                 break;
10602                         case FLASH_5752PAGE_SIZE_4K:
10603                                 tp->nvram_pagesize = 4096;
10604                                 break;
10605                         case FLASH_5752PAGE_SIZE_264:
10606                                 tp->nvram_pagesize = 264;
10607                                 break;
10608                 }
10609         }
10610         else {
10611                 /* For eeprom, set pagesize to maximum eeprom size */
10612                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10613
10614                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10615                 tw32(NVRAM_CFG1, nvcfg1);
10616         }
10617 }
10618
10619 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10620 {
10621         u32 nvcfg1, protect = 0;
10622
10623         nvcfg1 = tr32(NVRAM_CFG1);
10624
10625         /* NVRAM protection for TPM */
10626         if (nvcfg1 & (1 << 27)) {
10627                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10628                 protect = 1;
10629         }
10630
10631         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10632         switch (nvcfg1) {
10633                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10634                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10635                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10636                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10637                         tp->nvram_jedecnum = JEDEC_ATMEL;
10638                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10639                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10640                         tp->nvram_pagesize = 264;
10641                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10642                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10643                                 tp->nvram_size = (protect ? 0x3e200 :
10644                                                   TG3_NVRAM_SIZE_512KB);
10645                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10646                                 tp->nvram_size = (protect ? 0x1f200 :
10647                                                   TG3_NVRAM_SIZE_256KB);
10648                         else
10649                                 tp->nvram_size = (protect ? 0x1f200 :
10650                                                   TG3_NVRAM_SIZE_128KB);
10651                         break;
10652                 case FLASH_5752VENDOR_ST_M45PE10:
10653                 case FLASH_5752VENDOR_ST_M45PE20:
10654                 case FLASH_5752VENDOR_ST_M45PE40:
10655                         tp->nvram_jedecnum = JEDEC_ST;
10656                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10657                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10658                         tp->nvram_pagesize = 256;
10659                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10660                                 tp->nvram_size = (protect ?
10661                                                   TG3_NVRAM_SIZE_64KB :
10662                                                   TG3_NVRAM_SIZE_128KB);
10663                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10664                                 tp->nvram_size = (protect ?
10665                                                   TG3_NVRAM_SIZE_64KB :
10666                                                   TG3_NVRAM_SIZE_256KB);
10667                         else
10668                                 tp->nvram_size = (protect ?
10669                                                   TG3_NVRAM_SIZE_128KB :
10670                                                   TG3_NVRAM_SIZE_512KB);
10671                         break;
10672         }
10673 }
10674
10675 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10676 {
10677         u32 nvcfg1;
10678
10679         nvcfg1 = tr32(NVRAM_CFG1);
10680
10681         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10682                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10683                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10684                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10685                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10686                         tp->nvram_jedecnum = JEDEC_ATMEL;
10687                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10688                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10689
10690                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10691                         tw32(NVRAM_CFG1, nvcfg1);
10692                         break;
10693                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10694                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10695                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10696                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10697                         tp->nvram_jedecnum = JEDEC_ATMEL;
10698                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10699                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10700                         tp->nvram_pagesize = 264;
10701                         break;
10702                 case FLASH_5752VENDOR_ST_M45PE10:
10703                 case FLASH_5752VENDOR_ST_M45PE20:
10704                 case FLASH_5752VENDOR_ST_M45PE40:
10705                         tp->nvram_jedecnum = JEDEC_ST;
10706                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10707                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10708                         tp->nvram_pagesize = 256;
10709                         break;
10710         }
10711 }
10712
10713 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10714 {
10715         u32 nvcfg1, protect = 0;
10716
10717         nvcfg1 = tr32(NVRAM_CFG1);
10718
10719         /* NVRAM protection for TPM */
10720         if (nvcfg1 & (1 << 27)) {
10721                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10722                 protect = 1;
10723         }
10724
10725         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10726         switch (nvcfg1) {
10727                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10728                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10729                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10730                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10731                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10732                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10733                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10734                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10735                         tp->nvram_jedecnum = JEDEC_ATMEL;
10736                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10737                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10738                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10739                         tp->nvram_pagesize = 256;
10740                         break;
10741                 case FLASH_5761VENDOR_ST_A_M45PE20:
10742                 case FLASH_5761VENDOR_ST_A_M45PE40:
10743                 case FLASH_5761VENDOR_ST_A_M45PE80:
10744                 case FLASH_5761VENDOR_ST_A_M45PE16:
10745                 case FLASH_5761VENDOR_ST_M_M45PE20:
10746                 case FLASH_5761VENDOR_ST_M_M45PE40:
10747                 case FLASH_5761VENDOR_ST_M_M45PE80:
10748                 case FLASH_5761VENDOR_ST_M_M45PE16:
10749                         tp->nvram_jedecnum = JEDEC_ST;
10750                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10751                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10752                         tp->nvram_pagesize = 256;
10753                         break;
10754         }
10755
10756         if (protect) {
10757                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10758         } else {
10759                 switch (nvcfg1) {
10760                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10761                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10762                         case FLASH_5761VENDOR_ST_A_M45PE16:
10763                         case FLASH_5761VENDOR_ST_M_M45PE16:
10764                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10765                                 break;
10766                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10767                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10768                         case FLASH_5761VENDOR_ST_A_M45PE80:
10769                         case FLASH_5761VENDOR_ST_M_M45PE80:
10770                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10771                                 break;
10772                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10773                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10774                         case FLASH_5761VENDOR_ST_A_M45PE40:
10775                         case FLASH_5761VENDOR_ST_M_M45PE40:
10776                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10777                                 break;
10778                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10779                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10780                         case FLASH_5761VENDOR_ST_A_M45PE20:
10781                         case FLASH_5761VENDOR_ST_M_M45PE20:
10782                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10783                                 break;
10784                 }
10785         }
10786 }
10787
10788 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10789 {
10790         tp->nvram_jedecnum = JEDEC_ATMEL;
10791         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10792         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10793 }
10794
10795 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10796 static void __devinit tg3_nvram_init(struct tg3 *tp)
10797 {
10798         tw32_f(GRC_EEPROM_ADDR,
10799              (EEPROM_ADDR_FSM_RESET |
10800               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10801                EEPROM_ADDR_CLKPERD_SHIFT)));
10802
10803         msleep(1);
10804
10805         /* Enable seeprom accesses. */
10806         tw32_f(GRC_LOCAL_CTRL,
10807              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10808         udelay(100);
10809
10810         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10811             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10812                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10813
10814                 if (tg3_nvram_lock(tp)) {
10815                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10816                                "tg3_nvram_init failed.\n", tp->dev->name);
10817                         return;
10818                 }
10819                 tg3_enable_nvram_access(tp);
10820
10821                 tp->nvram_size = 0;
10822
10823                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10824                         tg3_get_5752_nvram_info(tp);
10825                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10826                         tg3_get_5755_nvram_info(tp);
10827                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10828                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10829                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10830                         tg3_get_5787_nvram_info(tp);
10831                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10832                         tg3_get_5761_nvram_info(tp);
10833                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10834                         tg3_get_5906_nvram_info(tp);
10835                 else
10836                         tg3_get_nvram_info(tp);
10837
10838                 if (tp->nvram_size == 0)
10839                         tg3_get_nvram_size(tp);
10840
10841                 tg3_disable_nvram_access(tp);
10842                 tg3_nvram_unlock(tp);
10843
10844         } else {
10845                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10846
10847                 tg3_get_eeprom_size(tp);
10848         }
10849 }
10850
10851 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10852                                         u32 offset, u32 *val)
10853 {
10854         u32 tmp;
10855         int i;
10856
10857         if (offset > EEPROM_ADDR_ADDR_MASK ||
10858             (offset % 4) != 0)
10859                 return -EINVAL;
10860
10861         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10862                                         EEPROM_ADDR_DEVID_MASK |
10863                                         EEPROM_ADDR_READ);
10864         tw32(GRC_EEPROM_ADDR,
10865              tmp |
10866              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10867              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10868               EEPROM_ADDR_ADDR_MASK) |
10869              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10870
10871         for (i = 0; i < 1000; i++) {
10872                 tmp = tr32(GRC_EEPROM_ADDR);
10873
10874                 if (tmp & EEPROM_ADDR_COMPLETE)
10875                         break;
10876                 msleep(1);
10877         }
10878         if (!(tmp & EEPROM_ADDR_COMPLETE))
10879                 return -EBUSY;
10880
10881         *val = tr32(GRC_EEPROM_DATA);
10882         return 0;
10883 }
10884
10885 #define NVRAM_CMD_TIMEOUT 10000
10886
10887 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10888 {
10889         int i;
10890
10891         tw32(NVRAM_CMD, nvram_cmd);
10892         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10893                 udelay(10);
10894                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10895                         udelay(10);
10896                         break;
10897                 }
10898         }
10899         if (i == NVRAM_CMD_TIMEOUT) {
10900                 return -EBUSY;
10901         }
10902         return 0;
10903 }
10904
10905 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10906 {
10907         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10908             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10909             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10910            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10911             (tp->nvram_jedecnum == JEDEC_ATMEL))
10912
10913                 addr = ((addr / tp->nvram_pagesize) <<
10914                         ATMEL_AT45DB0X1B_PAGE_POS) +
10915                        (addr % tp->nvram_pagesize);
10916
10917         return addr;
10918 }
10919
10920 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10921 {
10922         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10923             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10924             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10925            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10926             (tp->nvram_jedecnum == JEDEC_ATMEL))
10927
10928                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10929                         tp->nvram_pagesize) +
10930                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10931
10932         return addr;
10933 }
10934
10935 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10936 {
10937         int ret;
10938
10939         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10940                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10941
10942         offset = tg3_nvram_phys_addr(tp, offset);
10943
10944         if (offset > NVRAM_ADDR_MSK)
10945                 return -EINVAL;
10946
10947         ret = tg3_nvram_lock(tp);
10948         if (ret)
10949                 return ret;
10950
10951         tg3_enable_nvram_access(tp);
10952
10953         tw32(NVRAM_ADDR, offset);
10954         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10955                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10956
10957         if (ret == 0)
10958                 *val = swab32(tr32(NVRAM_RDDATA));
10959
10960         tg3_disable_nvram_access(tp);
10961
10962         tg3_nvram_unlock(tp);
10963
10964         return ret;
10965 }
10966
10967 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10968 {
10969         u32 v;
10970         int res = tg3_nvram_read(tp, offset, &v);
10971         if (!res)
10972                 *val = cpu_to_le32(v);
10973         return res;
10974 }
10975
10976 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10977 {
10978         int err;
10979         u32 tmp;
10980
10981         err = tg3_nvram_read(tp, offset, &tmp);
10982         *val = swab32(tmp);
10983         return err;
10984 }
10985
10986 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10987                                     u32 offset, u32 len, u8 *buf)
10988 {
10989         int i, j, rc = 0;
10990         u32 val;
10991
10992         for (i = 0; i < len; i += 4) {
10993                 u32 addr;
10994                 __le32 data;
10995
10996                 addr = offset + i;
10997
10998                 memcpy(&data, buf + i, 4);
10999
11000                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
11001
11002                 val = tr32(GRC_EEPROM_ADDR);
11003                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11004
11005                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11006                         EEPROM_ADDR_READ);
11007                 tw32(GRC_EEPROM_ADDR, val |
11008                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
11009                         (addr & EEPROM_ADDR_ADDR_MASK) |
11010                         EEPROM_ADDR_START |
11011                         EEPROM_ADDR_WRITE);
11012
11013                 for (j = 0; j < 1000; j++) {
11014                         val = tr32(GRC_EEPROM_ADDR);
11015
11016                         if (val & EEPROM_ADDR_COMPLETE)
11017                                 break;
11018                         msleep(1);
11019                 }
11020                 if (!(val & EEPROM_ADDR_COMPLETE)) {
11021                         rc = -EBUSY;
11022                         break;
11023                 }
11024         }
11025
11026         return rc;
11027 }
11028
11029 /* offset and length are dword aligned */
11030 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11031                 u8 *buf)
11032 {
11033         int ret = 0;
11034         u32 pagesize = tp->nvram_pagesize;
11035         u32 pagemask = pagesize - 1;
11036         u32 nvram_cmd;
11037         u8 *tmp;
11038
11039         tmp = kmalloc(pagesize, GFP_KERNEL);
11040         if (tmp == NULL)
11041                 return -ENOMEM;
11042
11043         while (len) {
11044                 int j;
11045                 u32 phy_addr, page_off, size;
11046
11047                 phy_addr = offset & ~pagemask;
11048
11049                 for (j = 0; j < pagesize; j += 4) {
11050                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11051                                                 (__le32 *) (tmp + j))))
11052                                 break;
11053                 }
11054                 if (ret)
11055                         break;
11056
11057                 page_off = offset & pagemask;
11058                 size = pagesize;
11059                 if (len < size)
11060                         size = len;
11061
11062                 len -= size;
11063
11064                 memcpy(tmp + page_off, buf, size);
11065
11066                 offset = offset + (pagesize - page_off);
11067
11068                 tg3_enable_nvram_access(tp);
11069
11070                 /*
11071                  * Before we can erase the flash page, we need
11072                  * to issue a special "write enable" command.
11073                  */
11074                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11075
11076                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11077                         break;
11078
11079                 /* Erase the target page */
11080                 tw32(NVRAM_ADDR, phy_addr);
11081
11082                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11083                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11084
11085                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11086                         break;
11087
11088                 /* Issue another write enable to start the write. */
11089                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11090
11091                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11092                         break;
11093
11094                 for (j = 0; j < pagesize; j += 4) {
11095                         __be32 data;
11096
11097                         data = *((__be32 *) (tmp + j));
11098                         /* swab32(le32_to_cpu(data)), actually */
11099                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11100
11101                         tw32(NVRAM_ADDR, phy_addr + j);
11102
11103                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11104                                 NVRAM_CMD_WR;
11105
11106                         if (j == 0)
11107                                 nvram_cmd |= NVRAM_CMD_FIRST;
11108                         else if (j == (pagesize - 4))
11109                                 nvram_cmd |= NVRAM_CMD_LAST;
11110
11111                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11112                                 break;
11113                 }
11114                 if (ret)
11115                         break;
11116         }
11117
11118         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11119         tg3_nvram_exec_cmd(tp, nvram_cmd);
11120
11121         kfree(tmp);
11122
11123         return ret;
11124 }
11125
11126 /* offset and length are dword aligned */
11127 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11128                 u8 *buf)
11129 {
11130         int i, ret = 0;
11131
11132         for (i = 0; i < len; i += 4, offset += 4) {
11133                 u32 page_off, phy_addr, nvram_cmd;
11134                 __be32 data;
11135
11136                 memcpy(&data, buf + i, 4);
11137                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11138
11139                 page_off = offset % tp->nvram_pagesize;
11140
11141                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11142
11143                 tw32(NVRAM_ADDR, phy_addr);
11144
11145                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11146
11147                 if ((page_off == 0) || (i == 0))
11148                         nvram_cmd |= NVRAM_CMD_FIRST;
11149                 if (page_off == (tp->nvram_pagesize - 4))
11150                         nvram_cmd |= NVRAM_CMD_LAST;
11151
11152                 if (i == (len - 4))
11153                         nvram_cmd |= NVRAM_CMD_LAST;
11154
11155                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11156                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11157                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11158                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11159                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11160                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11161                     (tp->nvram_jedecnum == JEDEC_ST) &&
11162                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11163
11164                         if ((ret = tg3_nvram_exec_cmd(tp,
11165                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11166                                 NVRAM_CMD_DONE)))
11167
11168                                 break;
11169                 }
11170                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11171                         /* We always do complete word writes to eeprom. */
11172                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11173                 }
11174
11175                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11176                         break;
11177         }
11178         return ret;
11179 }
11180
11181 /* offset and length are dword aligned */
11182 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11183 {
11184         int ret;
11185
11186         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11187                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11188                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11189                 udelay(40);
11190         }
11191
11192         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11193                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11194         }
11195         else {
11196                 u32 grc_mode;
11197
11198                 ret = tg3_nvram_lock(tp);
11199                 if (ret)
11200                         return ret;
11201
11202                 tg3_enable_nvram_access(tp);
11203                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11204                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11205                         tw32(NVRAM_WRITE1, 0x406);
11206
11207                 grc_mode = tr32(GRC_MODE);
11208                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11209
11210                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11211                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11212
11213                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11214                                 buf);
11215                 }
11216                 else {
11217                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11218                                 buf);
11219                 }
11220
11221                 grc_mode = tr32(GRC_MODE);
11222                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11223
11224                 tg3_disable_nvram_access(tp);
11225                 tg3_nvram_unlock(tp);
11226         }
11227
11228         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11229                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11230                 udelay(40);
11231         }
11232
11233         return ret;
11234 }
11235
11236 struct subsys_tbl_ent {
11237         u16 subsys_vendor, subsys_devid;
11238         u32 phy_id;
11239 };
11240
11241 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11242         /* Broadcom boards. */
11243         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11244         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11245         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11246         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11247         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11248         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11249         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11250         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11251         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11252         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11253         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11254
11255         /* 3com boards. */
11256         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11257         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11258         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11259         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11260         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11261
11262         /* DELL boards. */
11263         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11264         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11265         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11266         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11267
11268         /* Compaq boards. */
11269         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11270         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11271         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11272         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11273         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11274
11275         /* IBM boards. */
11276         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11277 };
11278
11279 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11280 {
11281         int i;
11282
11283         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11284                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11285                      tp->pdev->subsystem_vendor) &&
11286                     (subsys_id_to_phy_id[i].subsys_devid ==
11287                      tp->pdev->subsystem_device))
11288                         return &subsys_id_to_phy_id[i];
11289         }
11290         return NULL;
11291 }
11292
11293 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11294 {
11295         u32 val;
11296         u16 pmcsr;
11297
11298         /* On some early chips the SRAM cannot be accessed in D3hot state,
11299          * so need make sure we're in D0.
11300          */
11301         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11302         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11303         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11304         msleep(1);
11305
11306         /* Make sure register accesses (indirect or otherwise)
11307          * will function correctly.
11308          */
11309         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11310                                tp->misc_host_ctrl);
11311
11312         /* The memory arbiter has to be enabled in order for SRAM accesses
11313          * to succeed.  Normally on powerup the tg3 chip firmware will make
11314          * sure it is enabled, but other entities such as system netboot
11315          * code might disable it.
11316          */
11317         val = tr32(MEMARB_MODE);
11318         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11319
11320         tp->phy_id = PHY_ID_INVALID;
11321         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11322
11323         /* Assume an onboard device and WOL capable by default.  */
11324         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11325
11326         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11327                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11328                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11329                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11330                 }
11331                 val = tr32(VCPU_CFGSHDW);
11332                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11333                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11334                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11335                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11336                     device_may_wakeup(&tp->pdev->dev))
11337                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11338                 goto done;
11339         }
11340
11341         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11342         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11343                 u32 nic_cfg, led_cfg;
11344                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11345                 int eeprom_phy_serdes = 0;
11346
11347                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11348                 tp->nic_sram_data_cfg = nic_cfg;
11349
11350                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11351                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11352                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11353                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11354                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11355                     (ver > 0) && (ver < 0x100))
11356                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11357
11358                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11359                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11360
11361                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11362                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11363                         eeprom_phy_serdes = 1;
11364
11365                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11366                 if (nic_phy_id != 0) {
11367                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11368                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11369
11370                         eeprom_phy_id  = (id1 >> 16) << 10;
11371                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11372                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11373                 } else
11374                         eeprom_phy_id = 0;
11375
11376                 tp->phy_id = eeprom_phy_id;
11377                 if (eeprom_phy_serdes) {
11378                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11379                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11380                         else
11381                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11382                 }
11383
11384                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11385                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11386                                     SHASTA_EXT_LED_MODE_MASK);
11387                 else
11388                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11389
11390                 switch (led_cfg) {
11391                 default:
11392                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11393                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11394                         break;
11395
11396                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11397                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11398                         break;
11399
11400                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11401                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11402
11403                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11404                          * read on some older 5700/5701 bootcode.
11405                          */
11406                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11407                             ASIC_REV_5700 ||
11408                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11409                             ASIC_REV_5701)
11410                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11411
11412                         break;
11413
11414                 case SHASTA_EXT_LED_SHARED:
11415                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11416                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11417                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11418                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11419                                                  LED_CTRL_MODE_PHY_2);
11420                         break;
11421
11422                 case SHASTA_EXT_LED_MAC:
11423                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11424                         break;
11425
11426                 case SHASTA_EXT_LED_COMBO:
11427                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11428                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11429                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11430                                                  LED_CTRL_MODE_PHY_2);
11431                         break;
11432
11433                 }
11434
11435                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11436                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11437                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11438                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11439
11440                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11441                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11442
11443                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11444                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11445                         if ((tp->pdev->subsystem_vendor ==
11446                              PCI_VENDOR_ID_ARIMA) &&
11447                             (tp->pdev->subsystem_device == 0x205a ||
11448                              tp->pdev->subsystem_device == 0x2063))
11449                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11450                 } else {
11451                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11452                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11453                 }
11454
11455                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11456                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11457                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11458                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11459                 }
11460
11461                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11462                         (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11463                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11464
11465                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11466                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11467                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11468
11469                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11470                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11471                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11472
11473                 if (cfg2 & (1 << 17))
11474                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11475
11476                 /* serdes signal pre-emphasis in register 0x590 set by */
11477                 /* bootcode if bit 18 is set */
11478                 if (cfg2 & (1 << 18))
11479                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11480
11481                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11482                         u32 cfg3;
11483
11484                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11485                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11486                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11487                 }
11488
11489                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11490                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11491                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11492                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11493                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11494                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11495         }
11496 done:
11497         device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11498         device_set_wakeup_enable(&tp->pdev->dev,
11499                                  tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11500 }
11501
11502 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11503 {
11504         int i;
11505         u32 val;
11506
11507         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11508         tw32(OTP_CTRL, cmd);
11509
11510         /* Wait for up to 1 ms for command to execute. */
11511         for (i = 0; i < 100; i++) {
11512                 val = tr32(OTP_STATUS);
11513                 if (val & OTP_STATUS_CMD_DONE)
11514                         break;
11515                 udelay(10);
11516         }
11517
11518         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11519 }
11520
11521 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11522  * configuration is a 32-bit value that straddles the alignment boundary.
11523  * We do two 32-bit reads and then shift and merge the results.
11524  */
11525 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11526 {
11527         u32 bhalf_otp, thalf_otp;
11528
11529         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11530
11531         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11532                 return 0;
11533
11534         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11535
11536         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11537                 return 0;
11538
11539         thalf_otp = tr32(OTP_READ_DATA);
11540
11541         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11542
11543         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11544                 return 0;
11545
11546         bhalf_otp = tr32(OTP_READ_DATA);
11547
11548         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11549 }
11550
11551 static int __devinit tg3_phy_probe(struct tg3 *tp)
11552 {
11553         u32 hw_phy_id_1, hw_phy_id_2;
11554         u32 hw_phy_id, hw_phy_id_masked;
11555         int err;
11556
11557         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11558                 return tg3_phy_init(tp);
11559
11560         /* Reading the PHY ID register can conflict with ASF
11561          * firwmare access to the PHY hardware.
11562          */
11563         err = 0;
11564         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11565             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11566                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11567         } else {
11568                 /* Now read the physical PHY_ID from the chip and verify
11569                  * that it is sane.  If it doesn't look good, we fall back
11570                  * to either the hard-coded table based PHY_ID and failing
11571                  * that the value found in the eeprom area.
11572                  */
11573                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11574                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11575
11576                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11577                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11578                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11579
11580                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11581         }
11582
11583         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11584                 tp->phy_id = hw_phy_id;
11585                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11586                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11587                 else
11588                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11589         } else {
11590                 if (tp->phy_id != PHY_ID_INVALID) {
11591                         /* Do nothing, phy ID already set up in
11592                          * tg3_get_eeprom_hw_cfg().
11593                          */
11594                 } else {
11595                         struct subsys_tbl_ent *p;
11596
11597                         /* No eeprom signature?  Try the hardcoded
11598                          * subsys device table.
11599                          */
11600                         p = lookup_by_subsys(tp);
11601                         if (!p)
11602                                 return -ENODEV;
11603
11604                         tp->phy_id = p->phy_id;
11605                         if (!tp->phy_id ||
11606                             tp->phy_id == PHY_ID_BCM8002)
11607                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11608                 }
11609         }
11610
11611         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11612             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11613             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11614                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11615
11616                 tg3_readphy(tp, MII_BMSR, &bmsr);
11617                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11618                     (bmsr & BMSR_LSTATUS))
11619                         goto skip_phy_reset;
11620
11621                 err = tg3_phy_reset(tp);
11622                 if (err)
11623                         return err;
11624
11625                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11626                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11627                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11628                 tg3_ctrl = 0;
11629                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11630                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11631                                     MII_TG3_CTRL_ADV_1000_FULL);
11632                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11633                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11634                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11635                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11636                 }
11637
11638                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11639                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11640                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11641                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11642                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11643
11644                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11645                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11646
11647                         tg3_writephy(tp, MII_BMCR,
11648                                      BMCR_ANENABLE | BMCR_ANRESTART);
11649                 }
11650                 tg3_phy_set_wirespeed(tp);
11651
11652                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11653                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11654                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11655         }
11656
11657 skip_phy_reset:
11658         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11659                 err = tg3_init_5401phy_dsp(tp);
11660                 if (err)
11661                         return err;
11662         }
11663
11664         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11665                 err = tg3_init_5401phy_dsp(tp);
11666         }
11667
11668         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11669                 tp->link_config.advertising =
11670                         (ADVERTISED_1000baseT_Half |
11671                          ADVERTISED_1000baseT_Full |
11672                          ADVERTISED_Autoneg |
11673                          ADVERTISED_FIBRE);
11674         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11675                 tp->link_config.advertising &=
11676                         ~(ADVERTISED_1000baseT_Half |
11677                           ADVERTISED_1000baseT_Full);
11678
11679         return err;
11680 }
11681
11682 static void __devinit tg3_read_partno(struct tg3 *tp)
11683 {
11684         unsigned char vpd_data[256];
11685         unsigned int i;
11686         u32 magic;
11687
11688         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11689                 goto out_not_found;
11690
11691         if (magic == TG3_EEPROM_MAGIC) {
11692                 for (i = 0; i < 256; i += 4) {
11693                         u32 tmp;
11694
11695                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11696                                 goto out_not_found;
11697
11698                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11699                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11700                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11701                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11702                 }
11703         } else {
11704                 int vpd_cap;
11705
11706                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11707                 for (i = 0; i < 256; i += 4) {
11708                         u32 tmp, j = 0;
11709                         __le32 v;
11710                         u16 tmp16;
11711
11712                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11713                                               i);
11714                         while (j++ < 100) {
11715                                 pci_read_config_word(tp->pdev, vpd_cap +
11716                                                      PCI_VPD_ADDR, &tmp16);
11717                                 if (tmp16 & 0x8000)
11718                                         break;
11719                                 msleep(1);
11720                         }
11721                         if (!(tmp16 & 0x8000))
11722                                 goto out_not_found;
11723
11724                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11725                                               &tmp);
11726                         v = cpu_to_le32(tmp);
11727                         memcpy(&vpd_data[i], &v, 4);
11728                 }
11729         }
11730
11731         /* Now parse and find the part number. */
11732         for (i = 0; i < 254; ) {
11733                 unsigned char val = vpd_data[i];
11734                 unsigned int block_end;
11735
11736                 if (val == 0x82 || val == 0x91) {
11737                         i = (i + 3 +
11738                              (vpd_data[i + 1] +
11739                               (vpd_data[i + 2] << 8)));
11740                         continue;
11741                 }
11742
11743                 if (val != 0x90)
11744                         goto out_not_found;
11745
11746                 block_end = (i + 3 +
11747                              (vpd_data[i + 1] +
11748                               (vpd_data[i + 2] << 8)));
11749                 i += 3;
11750
11751                 if (block_end > 256)
11752                         goto out_not_found;
11753
11754                 while (i < (block_end - 2)) {
11755                         if (vpd_data[i + 0] == 'P' &&
11756                             vpd_data[i + 1] == 'N') {
11757                                 int partno_len = vpd_data[i + 2];
11758
11759                                 i += 3;
11760                                 if (partno_len > 24 || (partno_len + i) > 256)
11761                                         goto out_not_found;
11762
11763                                 memcpy(tp->board_part_number,
11764                                        &vpd_data[i], partno_len);
11765
11766                                 /* Success. */
11767                                 return;
11768                         }
11769                         i += 3 + vpd_data[i + 2];
11770                 }
11771
11772                 /* Part number not found. */
11773                 goto out_not_found;
11774         }
11775
11776 out_not_found:
11777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11778                 strcpy(tp->board_part_number, "BCM95906");
11779         else
11780                 strcpy(tp->board_part_number, "none");
11781 }
11782
11783 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11784 {
11785         u32 val;
11786
11787         if (tg3_nvram_read_swab(tp, offset, &val) ||
11788             (val & 0xfc000000) != 0x0c000000 ||
11789             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11790             val != 0)
11791                 return 0;
11792
11793         return 1;
11794 }
11795
11796 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11797 {
11798         u32 val, offset, start;
11799         u32 ver_offset;
11800         int i, bcnt;
11801
11802         if (tg3_nvram_read_swab(tp, 0, &val))
11803                 return;
11804
11805         if (val != TG3_EEPROM_MAGIC)
11806                 return;
11807
11808         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11809             tg3_nvram_read_swab(tp, 0x4, &start))
11810                 return;
11811
11812         offset = tg3_nvram_logical_addr(tp, offset);
11813
11814         if (!tg3_fw_img_is_valid(tp, offset) ||
11815             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11816                 return;
11817
11818         offset = offset + ver_offset - start;
11819         for (i = 0; i < 16; i += 4) {
11820                 __le32 v;
11821                 if (tg3_nvram_read_le(tp, offset + i, &v))
11822                         return;
11823
11824                 memcpy(tp->fw_ver + i, &v, 4);
11825         }
11826
11827         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11828              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11829                 return;
11830
11831         for (offset = TG3_NVM_DIR_START;
11832              offset < TG3_NVM_DIR_END;
11833              offset += TG3_NVM_DIRENT_SIZE) {
11834                 if (tg3_nvram_read_swab(tp, offset, &val))
11835                         return;
11836
11837                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11838                         break;
11839         }
11840
11841         if (offset == TG3_NVM_DIR_END)
11842                 return;
11843
11844         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11845                 start = 0x08000000;
11846         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11847                 return;
11848
11849         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11850             !tg3_fw_img_is_valid(tp, offset) ||
11851             tg3_nvram_read_swab(tp, offset + 8, &val))
11852                 return;
11853
11854         offset += val - start;
11855
11856         bcnt = strlen(tp->fw_ver);
11857
11858         tp->fw_ver[bcnt++] = ',';
11859         tp->fw_ver[bcnt++] = ' ';
11860
11861         for (i = 0; i < 4; i++) {
11862                 __le32 v;
11863                 if (tg3_nvram_read_le(tp, offset, &v))
11864                         return;
11865
11866                 offset += sizeof(v);
11867
11868                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11869                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11870                         break;
11871                 }
11872
11873                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11874                 bcnt += sizeof(v);
11875         }
11876
11877         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11878 }
11879
11880 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11881
11882 static int __devinit tg3_get_invariants(struct tg3 *tp)
11883 {
11884         static struct pci_device_id write_reorder_chipsets[] = {
11885                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11886                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11887                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11888                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11889                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11890                              PCI_DEVICE_ID_VIA_8385_0) },
11891                 { },
11892         };
11893         u32 misc_ctrl_reg;
11894         u32 cacheline_sz_reg;
11895         u32 pci_state_reg, grc_misc_cfg;
11896         u32 val;
11897         u16 pci_cmd;
11898         int err, pcie_cap;
11899
11900         /* Force memory write invalidate off.  If we leave it on,
11901          * then on 5700_BX chips we have to enable a workaround.
11902          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11903          * to match the cacheline size.  The Broadcom driver have this
11904          * workaround but turns MWI off all the times so never uses
11905          * it.  This seems to suggest that the workaround is insufficient.
11906          */
11907         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11908         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11909         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11910
11911         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11912          * has the register indirect write enable bit set before
11913          * we try to access any of the MMIO registers.  It is also
11914          * critical that the PCI-X hw workaround situation is decided
11915          * before that as well.
11916          */
11917         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11918                               &misc_ctrl_reg);
11919
11920         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11921                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11922         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11923                 u32 prod_id_asic_rev;
11924
11925                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11926                                       &prod_id_asic_rev);
11927                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11928         }
11929
11930         /* Wrong chip ID in 5752 A0. This code can be removed later
11931          * as A0 is not in production.
11932          */
11933         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11934                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11935
11936         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11937          * we need to disable memory and use config. cycles
11938          * only to access all registers. The 5702/03 chips
11939          * can mistakenly decode the special cycles from the
11940          * ICH chipsets as memory write cycles, causing corruption
11941          * of register and memory space. Only certain ICH bridges
11942          * will drive special cycles with non-zero data during the
11943          * address phase which can fall within the 5703's address
11944          * range. This is not an ICH bug as the PCI spec allows
11945          * non-zero address during special cycles. However, only
11946          * these ICH bridges are known to drive non-zero addresses
11947          * during special cycles.
11948          *
11949          * Since special cycles do not cross PCI bridges, we only
11950          * enable this workaround if the 5703 is on the secondary
11951          * bus of these ICH bridges.
11952          */
11953         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11954             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11955                 static struct tg3_dev_id {
11956                         u32     vendor;
11957                         u32     device;
11958                         u32     rev;
11959                 } ich_chipsets[] = {
11960                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11961                           PCI_ANY_ID },
11962                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11963                           PCI_ANY_ID },
11964                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11965                           0xa },
11966                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11967                           PCI_ANY_ID },
11968                         { },
11969                 };
11970                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11971                 struct pci_dev *bridge = NULL;
11972
11973                 while (pci_id->vendor != 0) {
11974                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11975                                                 bridge);
11976                         if (!bridge) {
11977                                 pci_id++;
11978                                 continue;
11979                         }
11980                         if (pci_id->rev != PCI_ANY_ID) {
11981                                 if (bridge->revision > pci_id->rev)
11982                                         continue;
11983                         }
11984                         if (bridge->subordinate &&
11985                             (bridge->subordinate->number ==
11986                              tp->pdev->bus->number)) {
11987
11988                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11989                                 pci_dev_put(bridge);
11990                                 break;
11991                         }
11992                 }
11993         }
11994
11995         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11996                 static struct tg3_dev_id {
11997                         u32     vendor;
11998                         u32     device;
11999                 } bridge_chipsets[] = {
12000                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12001                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12002                         { },
12003                 };
12004                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12005                 struct pci_dev *bridge = NULL;
12006
12007                 while (pci_id->vendor != 0) {
12008                         bridge = pci_get_device(pci_id->vendor,
12009                                                 pci_id->device,
12010                                                 bridge);
12011                         if (!bridge) {
12012                                 pci_id++;
12013                                 continue;
12014                         }
12015                         if (bridge->subordinate &&
12016                             (bridge->subordinate->number <=
12017                              tp->pdev->bus->number) &&
12018                             (bridge->subordinate->subordinate >=
12019                              tp->pdev->bus->number)) {
12020                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12021                                 pci_dev_put(bridge);
12022                                 break;
12023                         }
12024                 }
12025         }
12026
12027         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12028          * DMA addresses > 40-bit. This bridge may have other additional
12029          * 57xx devices behind it in some 4-port NIC designs for example.
12030          * Any tg3 device found behind the bridge will also need the 40-bit
12031          * DMA workaround.
12032          */
12033         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12034             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12035                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12036                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12037                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12038         }
12039         else {
12040                 struct pci_dev *bridge = NULL;
12041
12042                 do {
12043                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12044                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
12045                                                 bridge);
12046                         if (bridge && bridge->subordinate &&
12047                             (bridge->subordinate->number <=
12048                              tp->pdev->bus->number) &&
12049                             (bridge->subordinate->subordinate >=
12050                              tp->pdev->bus->number)) {
12051                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12052                                 pci_dev_put(bridge);
12053                                 break;
12054                         }
12055                 } while (bridge);
12056         }
12057
12058         /* Initialize misc host control in PCI block. */
12059         tp->misc_host_ctrl |= (misc_ctrl_reg &
12060                                MISC_HOST_CTRL_CHIPREV);
12061         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12062                                tp->misc_host_ctrl);
12063
12064         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12065                               &cacheline_sz_reg);
12066
12067         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12068         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12069         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12070         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12071
12072         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12073             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12074                 tp->pdev_peer = tg3_find_peer(tp);
12075
12076         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12077             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12078             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12079             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12080             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12081             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12082             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12083             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12084             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12085                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12086
12087         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12088             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12089                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12090
12091         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12092                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12093                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12094                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12095                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12096                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12097                      tp->pdev_peer == tp->pdev))
12098                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12099
12100                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12101                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12102                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12103                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12104                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12105                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12106                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12107                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12108                 } else {
12109                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12110                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12111                                 ASIC_REV_5750 &&
12112                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12113                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12114                 }
12115         }
12116
12117         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12118              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12119                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12120
12121         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12122         if (pcie_cap != 0) {
12123                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12124
12125                 pcie_set_readrq(tp->pdev, 4096);
12126
12127                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12128                         u16 lnkctl;
12129
12130                         pci_read_config_word(tp->pdev,
12131                                              pcie_cap + PCI_EXP_LNKCTL,
12132                                              &lnkctl);
12133                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12134                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12135                 }
12136         }
12137
12138         /* If we have an AMD 762 or VIA K8T800 chipset, write
12139          * reordering to the mailbox registers done by the host
12140          * controller can cause major troubles.  We read back from
12141          * every mailbox register write to force the writes to be
12142          * posted to the chip in order.
12143          */
12144         if (pci_dev_present(write_reorder_chipsets) &&
12145             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12146                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12147
12148         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12149             tp->pci_lat_timer < 64) {
12150                 tp->pci_lat_timer = 64;
12151
12152                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12153                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12154                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12155                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12156
12157                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12158                                        cacheline_sz_reg);
12159         }
12160
12161         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12162             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12163                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12164                 if (!tp->pcix_cap) {
12165                         printk(KERN_ERR PFX "Cannot find PCI-X "
12166                                             "capability, aborting.\n");
12167                         return -EIO;
12168                 }
12169         }
12170
12171         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12172                               &pci_state_reg);
12173
12174         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12175                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12176
12177                 /* If this is a 5700 BX chipset, and we are in PCI-X
12178                  * mode, enable register write workaround.
12179                  *
12180                  * The workaround is to use indirect register accesses
12181                  * for all chip writes not to mailbox registers.
12182                  */
12183                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12184                         u32 pm_reg;
12185
12186                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12187
12188                         /* The chip can have it's power management PCI config
12189                          * space registers clobbered due to this bug.
12190                          * So explicitly force the chip into D0 here.
12191                          */
12192                         pci_read_config_dword(tp->pdev,
12193                                               tp->pm_cap + PCI_PM_CTRL,
12194                                               &pm_reg);
12195                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12196                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12197                         pci_write_config_dword(tp->pdev,
12198                                                tp->pm_cap + PCI_PM_CTRL,
12199                                                pm_reg);
12200
12201                         /* Also, force SERR#/PERR# in PCI command. */
12202                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12203                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12204                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12205                 }
12206         }
12207
12208         /* 5700 BX chips need to have their TX producer index mailboxes
12209          * written twice to workaround a bug.
12210          */
12211         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12212                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12213
12214         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12215                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12216         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12217                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12218
12219         /* Chip-specific fixup from Broadcom driver */
12220         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12221             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12222                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12223                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12224         }
12225
12226         /* Default fast path register access methods */
12227         tp->read32 = tg3_read32;
12228         tp->write32 = tg3_write32;
12229         tp->read32_mbox = tg3_read32;
12230         tp->write32_mbox = tg3_write32;
12231         tp->write32_tx_mbox = tg3_write32;
12232         tp->write32_rx_mbox = tg3_write32;
12233
12234         /* Various workaround register access methods */
12235         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12236                 tp->write32 = tg3_write_indirect_reg32;
12237         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12238                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12239                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12240                 /*
12241                  * Back to back register writes can cause problems on these
12242                  * chips, the workaround is to read back all reg writes
12243                  * except those to mailbox regs.
12244                  *
12245                  * See tg3_write_indirect_reg32().
12246                  */
12247                 tp->write32 = tg3_write_flush_reg32;
12248         }
12249
12250
12251         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12252             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12253                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12254                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12255                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12256         }
12257
12258         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12259                 tp->read32 = tg3_read_indirect_reg32;
12260                 tp->write32 = tg3_write_indirect_reg32;
12261                 tp->read32_mbox = tg3_read_indirect_mbox;
12262                 tp->write32_mbox = tg3_write_indirect_mbox;
12263                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12264                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12265
12266                 iounmap(tp->regs);
12267                 tp->regs = NULL;
12268
12269                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12270                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12271                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12272         }
12273         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12274                 tp->read32_mbox = tg3_read32_mbox_5906;
12275                 tp->write32_mbox = tg3_write32_mbox_5906;
12276                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12277                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12278         }
12279
12280         if (tp->write32 == tg3_write_indirect_reg32 ||
12281             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12282              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12283               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12284                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12285
12286         /* Get eeprom hw config before calling tg3_set_power_state().
12287          * In particular, the TG3_FLG2_IS_NIC flag must be
12288          * determined before calling tg3_set_power_state() so that
12289          * we know whether or not to switch out of Vaux power.
12290          * When the flag is set, it means that GPIO1 is used for eeprom
12291          * write protect and also implies that it is a LOM where GPIOs
12292          * are not used to switch power.
12293          */
12294         tg3_get_eeprom_hw_cfg(tp);
12295
12296         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12297                 /* Allow reads and writes to the
12298                  * APE register and memory space.
12299                  */
12300                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12301                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12302                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12303                                        pci_state_reg);
12304         }
12305
12306         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12307             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12308             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12309                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12310
12311         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12312          * GPIO1 driven high will bring 5700's external PHY out of reset.
12313          * It is also used as eeprom write protect on LOMs.
12314          */
12315         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12316         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12317             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12318                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12319                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12320         /* Unused GPIO3 must be driven as output on 5752 because there
12321          * are no pull-up resistors on unused GPIO pins.
12322          */
12323         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12324                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12325
12326         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12327                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12328
12329         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12330                 /* Turn off the debug UART. */
12331                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12332                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12333                         /* Keep VMain power. */
12334                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12335                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12336         }
12337
12338         /* Force the chip into D0. */
12339         err = tg3_set_power_state(tp, PCI_D0);
12340         if (err) {
12341                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12342                        pci_name(tp->pdev));
12343                 return err;
12344         }
12345
12346         /* 5700 B0 chips do not support checksumming correctly due
12347          * to hardware bugs.
12348          */
12349         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12350                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12351
12352         /* Derive initial jumbo mode from MTU assigned in
12353          * ether_setup() via the alloc_etherdev() call
12354          */
12355         if (tp->dev->mtu > ETH_DATA_LEN &&
12356             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12357                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12358
12359         /* Determine WakeOnLan speed to use. */
12360         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12361             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12362             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12363             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12364                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12365         } else {
12366                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12367         }
12368
12369         /* A few boards don't want Ethernet@WireSpeed phy feature */
12370         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12371             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12372              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12373              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12374             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12375             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12376                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12377
12378         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12379             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12380                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12381         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12382                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12383
12384         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12385                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12386                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12387                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12388                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12389                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12390                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12391                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12392                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12393                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12394                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12395                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12396                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12397         }
12398
12399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12400             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12401                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12402                 if (tp->phy_otp == 0)
12403                         tp->phy_otp = TG3_OTP_DEFAULT;
12404         }
12405
12406         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12407                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12408         else
12409                 tp->mi_mode = MAC_MI_MODE_BASE;
12410
12411         tp->coalesce_mode = 0;
12412         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12413             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12414                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12415
12416         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12417                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12418
12419         err = tg3_mdio_init(tp);
12420         if (err)
12421                 return err;
12422
12423         /* Initialize data/descriptor byte/word swapping. */
12424         val = tr32(GRC_MODE);
12425         val &= GRC_MODE_HOST_STACKUP;
12426         tw32(GRC_MODE, val | tp->grc_mode);
12427
12428         tg3_switch_clocks(tp);
12429
12430         /* Clear this out for sanity. */
12431         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12432
12433         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12434                               &pci_state_reg);
12435         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12436             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12437                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12438
12439                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12440                     chiprevid == CHIPREV_ID_5701_B0 ||
12441                     chiprevid == CHIPREV_ID_5701_B2 ||
12442                     chiprevid == CHIPREV_ID_5701_B5) {
12443                         void __iomem *sram_base;
12444
12445                         /* Write some dummy words into the SRAM status block
12446                          * area, see if it reads back correctly.  If the return
12447                          * value is bad, force enable the PCIX workaround.
12448                          */
12449                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12450
12451                         writel(0x00000000, sram_base);
12452                         writel(0x00000000, sram_base + 4);
12453                         writel(0xffffffff, sram_base + 4);
12454                         if (readl(sram_base) != 0x00000000)
12455                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12456                 }
12457         }
12458
12459         udelay(50);
12460         tg3_nvram_init(tp);
12461
12462         grc_misc_cfg = tr32(GRC_MISC_CFG);
12463         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12464
12465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12466             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12467              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12468                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12469
12470         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12471             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12472                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12473         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12474                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12475                                       HOSTCC_MODE_CLRTICK_TXBD);
12476
12477                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12478                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12479                                        tp->misc_host_ctrl);
12480         }
12481
12482         /* Preserve the APE MAC_MODE bits */
12483         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12484                 tp->mac_mode = tr32(MAC_MODE) |
12485                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12486         else
12487                 tp->mac_mode = TG3_DEF_MAC_MODE;
12488
12489         /* these are limited to 10/100 only */
12490         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12491              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12492             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12493              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12494              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12495               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12496               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12497             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12498              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12499               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12500               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12501             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12502                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12503
12504         err = tg3_phy_probe(tp);
12505         if (err) {
12506                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12507                        pci_name(tp->pdev), err);
12508                 /* ... but do not return immediately ... */
12509                 tg3_mdio_fini(tp);
12510         }
12511
12512         tg3_read_partno(tp);
12513         tg3_read_fw_ver(tp);
12514
12515         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12516                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12517         } else {
12518                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12519                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12520                 else
12521                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12522         }
12523
12524         /* 5700 {AX,BX} chips have a broken status block link
12525          * change bit implementation, so we must use the
12526          * status register in those cases.
12527          */
12528         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12529                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12530         else
12531                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12532
12533         /* The led_ctrl is set during tg3_phy_probe, here we might
12534          * have to force the link status polling mechanism based
12535          * upon subsystem IDs.
12536          */
12537         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12538             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12539             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12540                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12541                                   TG3_FLAG_USE_LINKCHG_REG);
12542         }
12543
12544         /* For all SERDES we poll the MAC status register. */
12545         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12546                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12547         else
12548                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12549
12550         /* All chips before 5787 can get confused if TX buffers
12551          * straddle the 4GB address boundary in some cases.
12552          */
12553         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12554             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12555             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12556             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12557             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12558             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12559                 tp->dev->hard_start_xmit = tg3_start_xmit;
12560         else
12561                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12562
12563         tp->rx_offset = 2;
12564         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12565             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12566                 tp->rx_offset = 0;
12567
12568         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12569
12570         /* Increment the rx prod index on the rx std ring by at most
12571          * 8 for these chips to workaround hw errata.
12572          */
12573         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12574             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12575             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12576                 tp->rx_std_max_post = 8;
12577
12578         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12579                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12580                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12581
12582         return err;
12583 }
12584
12585 #ifdef CONFIG_SPARC
12586 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12587 {
12588         struct net_device *dev = tp->dev;
12589         struct pci_dev *pdev = tp->pdev;
12590         struct device_node *dp = pci_device_to_OF_node(pdev);
12591         const unsigned char *addr;
12592         int len;
12593
12594         addr = of_get_property(dp, "local-mac-address", &len);
12595         if (addr && len == 6) {
12596                 memcpy(dev->dev_addr, addr, 6);
12597                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12598                 return 0;
12599         }
12600         return -ENODEV;
12601 }
12602
12603 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12604 {
12605         struct net_device *dev = tp->dev;
12606
12607         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12608         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12609         return 0;
12610 }
12611 #endif
12612
12613 static int __devinit tg3_get_device_address(struct tg3 *tp)
12614 {
12615         struct net_device *dev = tp->dev;
12616         u32 hi, lo, mac_offset;
12617         int addr_ok = 0;
12618
12619 #ifdef CONFIG_SPARC
12620         if (!tg3_get_macaddr_sparc(tp))
12621                 return 0;
12622 #endif
12623
12624         mac_offset = 0x7c;
12625         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12626             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12627                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12628                         mac_offset = 0xcc;
12629                 if (tg3_nvram_lock(tp))
12630                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12631                 else
12632                         tg3_nvram_unlock(tp);
12633         }
12634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12635                 mac_offset = 0x10;
12636
12637         /* First try to get it from MAC address mailbox. */
12638         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12639         if ((hi >> 16) == 0x484b) {
12640                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12641                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12642
12643                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12644                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12645                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12646                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12647                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12648
12649                 /* Some old bootcode may report a 0 MAC address in SRAM */
12650                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12651         }
12652         if (!addr_ok) {
12653                 /* Next, try NVRAM. */
12654                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12655                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12656                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12657                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12658                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12659                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12660                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12661                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12662                 }
12663                 /* Finally just fetch it out of the MAC control regs. */
12664                 else {
12665                         hi = tr32(MAC_ADDR_0_HIGH);
12666                         lo = tr32(MAC_ADDR_0_LOW);
12667
12668                         dev->dev_addr[5] = lo & 0xff;
12669                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12670                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12671                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12672                         dev->dev_addr[1] = hi & 0xff;
12673                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12674                 }
12675         }
12676
12677         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12678 #ifdef CONFIG_SPARC
12679                 if (!tg3_get_default_macaddr_sparc(tp))
12680                         return 0;
12681 #endif
12682                 return -EINVAL;
12683         }
12684         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12685         return 0;
12686 }
12687
12688 #define BOUNDARY_SINGLE_CACHELINE       1
12689 #define BOUNDARY_MULTI_CACHELINE        2
12690
12691 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12692 {
12693         int cacheline_size;
12694         u8 byte;
12695         int goal;
12696
12697         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12698         if (byte == 0)
12699                 cacheline_size = 1024;
12700         else
12701                 cacheline_size = (int) byte * 4;
12702
12703         /* On 5703 and later chips, the boundary bits have no
12704          * effect.
12705          */
12706         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12707             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12708             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12709                 goto out;
12710
12711 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12712         goal = BOUNDARY_MULTI_CACHELINE;
12713 #else
12714 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12715         goal = BOUNDARY_SINGLE_CACHELINE;
12716 #else
12717         goal = 0;
12718 #endif
12719 #endif
12720
12721         if (!goal)
12722                 goto out;
12723
12724         /* PCI controllers on most RISC systems tend to disconnect
12725          * when a device tries to burst across a cache-line boundary.
12726          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12727          *
12728          * Unfortunately, for PCI-E there are only limited
12729          * write-side controls for this, and thus for reads
12730          * we will still get the disconnects.  We'll also waste
12731          * these PCI cycles for both read and write for chips
12732          * other than 5700 and 5701 which do not implement the
12733          * boundary bits.
12734          */
12735         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12736             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12737                 switch (cacheline_size) {
12738                 case 16:
12739                 case 32:
12740                 case 64:
12741                 case 128:
12742                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12743                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12744                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12745                         } else {
12746                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12747                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12748                         }
12749                         break;
12750
12751                 case 256:
12752                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12753                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12754                         break;
12755
12756                 default:
12757                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12758                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12759                         break;
12760                 }
12761         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12762                 switch (cacheline_size) {
12763                 case 16:
12764                 case 32:
12765                 case 64:
12766                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12767                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12768                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12769                                 break;
12770                         }
12771                         /* fallthrough */
12772                 case 128:
12773                 default:
12774                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12775                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12776                         break;
12777                 }
12778         } else {
12779                 switch (cacheline_size) {
12780                 case 16:
12781                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12782                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12783                                         DMA_RWCTRL_WRITE_BNDRY_16);
12784                                 break;
12785                         }
12786                         /* fallthrough */
12787                 case 32:
12788                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12789                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12790                                         DMA_RWCTRL_WRITE_BNDRY_32);
12791                                 break;
12792                         }
12793                         /* fallthrough */
12794                 case 64:
12795                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12796                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12797                                         DMA_RWCTRL_WRITE_BNDRY_64);
12798                                 break;
12799                         }
12800                         /* fallthrough */
12801                 case 128:
12802                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12803                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12804                                         DMA_RWCTRL_WRITE_BNDRY_128);
12805                                 break;
12806                         }
12807                         /* fallthrough */
12808                 case 256:
12809                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12810                                 DMA_RWCTRL_WRITE_BNDRY_256);
12811                         break;
12812                 case 512:
12813                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12814                                 DMA_RWCTRL_WRITE_BNDRY_512);
12815                         break;
12816                 case 1024:
12817                 default:
12818                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12819                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12820                         break;
12821                 }
12822         }
12823
12824 out:
12825         return val;
12826 }
12827
12828 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12829 {
12830         struct tg3_internal_buffer_desc test_desc;
12831         u32 sram_dma_descs;
12832         int i, ret;
12833
12834         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12835
12836         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12837         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12838         tw32(RDMAC_STATUS, 0);
12839         tw32(WDMAC_STATUS, 0);
12840
12841         tw32(BUFMGR_MODE, 0);
12842         tw32(FTQ_RESET, 0);
12843
12844         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12845         test_desc.addr_lo = buf_dma & 0xffffffff;
12846         test_desc.nic_mbuf = 0x00002100;
12847         test_desc.len = size;
12848
12849         /*
12850          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12851          * the *second* time the tg3 driver was getting loaded after an
12852          * initial scan.
12853          *
12854          * Broadcom tells me:
12855          *   ...the DMA engine is connected to the GRC block and a DMA
12856          *   reset may affect the GRC block in some unpredictable way...
12857          *   The behavior of resets to individual blocks has not been tested.
12858          *
12859          * Broadcom noted the GRC reset will also reset all sub-components.
12860          */
12861         if (to_device) {
12862                 test_desc.cqid_sqid = (13 << 8) | 2;
12863
12864                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12865                 udelay(40);
12866         } else {
12867                 test_desc.cqid_sqid = (16 << 8) | 7;
12868
12869                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12870                 udelay(40);
12871         }
12872         test_desc.flags = 0x00000005;
12873
12874         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12875                 u32 val;
12876
12877                 val = *(((u32 *)&test_desc) + i);
12878                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12879                                        sram_dma_descs + (i * sizeof(u32)));
12880                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12881         }
12882         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12883
12884         if (to_device) {
12885                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12886         } else {
12887                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12888         }
12889
12890         ret = -ENODEV;
12891         for (i = 0; i < 40; i++) {
12892                 u32 val;
12893
12894                 if (to_device)
12895                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12896                 else
12897                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12898                 if ((val & 0xffff) == sram_dma_descs) {
12899                         ret = 0;
12900                         break;
12901                 }
12902
12903                 udelay(100);
12904         }
12905
12906         return ret;
12907 }
12908
12909 #define TEST_BUFFER_SIZE        0x2000
12910
12911 static int __devinit tg3_test_dma(struct tg3 *tp)
12912 {
12913         dma_addr_t buf_dma;
12914         u32 *buf, saved_dma_rwctrl;
12915         int ret;
12916
12917         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12918         if (!buf) {
12919                 ret = -ENOMEM;
12920                 goto out_nofree;
12921         }
12922
12923         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12924                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12925
12926         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12927
12928         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12929                 /* DMA read watermark not used on PCIE */
12930                 tp->dma_rwctrl |= 0x00180000;
12931         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12932                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12933                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12934                         tp->dma_rwctrl |= 0x003f0000;
12935                 else
12936                         tp->dma_rwctrl |= 0x003f000f;
12937         } else {
12938                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12939                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12940                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12941                         u32 read_water = 0x7;
12942
12943                         /* If the 5704 is behind the EPB bridge, we can
12944                          * do the less restrictive ONE_DMA workaround for
12945                          * better performance.
12946                          */
12947                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12948                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12949                                 tp->dma_rwctrl |= 0x8000;
12950                         else if (ccval == 0x6 || ccval == 0x7)
12951                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12952
12953                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12954                                 read_water = 4;
12955                         /* Set bit 23 to enable PCIX hw bug fix */
12956                         tp->dma_rwctrl |=
12957                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12958                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12959                                 (1 << 23);
12960                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12961                         /* 5780 always in PCIX mode */
12962                         tp->dma_rwctrl |= 0x00144000;
12963                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12964                         /* 5714 always in PCIX mode */
12965                         tp->dma_rwctrl |= 0x00148000;
12966                 } else {
12967                         tp->dma_rwctrl |= 0x001b000f;
12968                 }
12969         }
12970
12971         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12972             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12973                 tp->dma_rwctrl &= 0xfffffff0;
12974
12975         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12977                 /* Remove this if it causes problems for some boards. */
12978                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12979
12980                 /* On 5700/5701 chips, we need to set this bit.
12981                  * Otherwise the chip will issue cacheline transactions
12982                  * to streamable DMA memory with not all the byte
12983                  * enables turned on.  This is an error on several
12984                  * RISC PCI controllers, in particular sparc64.
12985                  *
12986                  * On 5703/5704 chips, this bit has been reassigned
12987                  * a different meaning.  In particular, it is used
12988                  * on those chips to enable a PCI-X workaround.
12989                  */
12990                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12991         }
12992
12993         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12994
12995 #if 0
12996         /* Unneeded, already done by tg3_get_invariants.  */
12997         tg3_switch_clocks(tp);
12998 #endif
12999
13000         ret = 0;
13001         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13002             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13003                 goto out;
13004
13005         /* It is best to perform DMA test with maximum write burst size
13006          * to expose the 5700/5701 write DMA bug.
13007          */
13008         saved_dma_rwctrl = tp->dma_rwctrl;
13009         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13010         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13011
13012         while (1) {
13013                 u32 *p = buf, i;
13014
13015                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13016                         p[i] = i;
13017
13018                 /* Send the buffer to the chip. */
13019                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13020                 if (ret) {
13021                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13022                         break;
13023                 }
13024
13025 #if 0
13026                 /* validate data reached card RAM correctly. */
13027                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13028                         u32 val;
13029                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
13030                         if (le32_to_cpu(val) != p[i]) {
13031                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
13032                                 /* ret = -ENODEV here? */
13033                         }
13034                         p[i] = 0;
13035                 }
13036 #endif
13037                 /* Now read it back. */
13038                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13039                 if (ret) {
13040                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13041
13042                         break;
13043                 }
13044
13045                 /* Verify it. */
13046                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13047                         if (p[i] == i)
13048                                 continue;
13049
13050                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13051                             DMA_RWCTRL_WRITE_BNDRY_16) {
13052                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13053                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13054                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13055                                 break;
13056                         } else {
13057                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13058                                 ret = -ENODEV;
13059                                 goto out;
13060                         }
13061                 }
13062
13063                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13064                         /* Success. */
13065                         ret = 0;
13066                         break;
13067                 }
13068         }
13069         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13070             DMA_RWCTRL_WRITE_BNDRY_16) {
13071                 static struct pci_device_id dma_wait_state_chipsets[] = {
13072                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13073                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13074                         { },
13075                 };
13076
13077                 /* DMA test passed without adjusting DMA boundary,
13078                  * now look for chipsets that are known to expose the
13079                  * DMA bug without failing the test.
13080                  */
13081                 if (pci_dev_present(dma_wait_state_chipsets)) {
13082                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13083                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13084                 }
13085                 else
13086                         /* Safe to use the calculated DMA boundary. */
13087                         tp->dma_rwctrl = saved_dma_rwctrl;
13088
13089                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13090         }
13091
13092 out:
13093         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13094 out_nofree:
13095         return ret;
13096 }
13097
13098 static void __devinit tg3_init_link_config(struct tg3 *tp)
13099 {
13100         tp->link_config.advertising =
13101                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13102                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13103                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13104                  ADVERTISED_Autoneg | ADVERTISED_MII);
13105         tp->link_config.speed = SPEED_INVALID;
13106         tp->link_config.duplex = DUPLEX_INVALID;
13107         tp->link_config.autoneg = AUTONEG_ENABLE;
13108         tp->link_config.active_speed = SPEED_INVALID;
13109         tp->link_config.active_duplex = DUPLEX_INVALID;
13110         tp->link_config.phy_is_low_power = 0;
13111         tp->link_config.orig_speed = SPEED_INVALID;
13112         tp->link_config.orig_duplex = DUPLEX_INVALID;
13113         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13114 }
13115
13116 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13117 {
13118         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13119                 tp->bufmgr_config.mbuf_read_dma_low_water =
13120                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13121                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13122                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13123                 tp->bufmgr_config.mbuf_high_water =
13124                         DEFAULT_MB_HIGH_WATER_5705;
13125                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13126                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13127                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13128                         tp->bufmgr_config.mbuf_high_water =
13129                                 DEFAULT_MB_HIGH_WATER_5906;
13130                 }
13131
13132                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13133                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13134                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13135                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13136                 tp->bufmgr_config.mbuf_high_water_jumbo =
13137                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13138         } else {
13139                 tp->bufmgr_config.mbuf_read_dma_low_water =
13140                         DEFAULT_MB_RDMA_LOW_WATER;
13141                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13142                         DEFAULT_MB_MACRX_LOW_WATER;
13143                 tp->bufmgr_config.mbuf_high_water =
13144                         DEFAULT_MB_HIGH_WATER;
13145
13146                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13147                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13148                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13149                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13150                 tp->bufmgr_config.mbuf_high_water_jumbo =
13151                         DEFAULT_MB_HIGH_WATER_JUMBO;
13152         }
13153
13154         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13155         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13156 }
13157
13158 static char * __devinit tg3_phy_string(struct tg3 *tp)
13159 {
13160         switch (tp->phy_id & PHY_ID_MASK) {
13161         case PHY_ID_BCM5400:    return "5400";
13162         case PHY_ID_BCM5401:    return "5401";
13163         case PHY_ID_BCM5411:    return "5411";
13164         case PHY_ID_BCM5701:    return "5701";
13165         case PHY_ID_BCM5703:    return "5703";
13166         case PHY_ID_BCM5704:    return "5704";
13167         case PHY_ID_BCM5705:    return "5705";
13168         case PHY_ID_BCM5750:    return "5750";
13169         case PHY_ID_BCM5752:    return "5752";
13170         case PHY_ID_BCM5714:    return "5714";
13171         case PHY_ID_BCM5780:    return "5780";
13172         case PHY_ID_BCM5755:    return "5755";
13173         case PHY_ID_BCM5787:    return "5787";
13174         case PHY_ID_BCM5784:    return "5784";
13175         case PHY_ID_BCM5756:    return "5722/5756";
13176         case PHY_ID_BCM5906:    return "5906";
13177         case PHY_ID_BCM5761:    return "5761";
13178         case PHY_ID_BCM8002:    return "8002/serdes";
13179         case 0:                 return "serdes";
13180         default:                return "unknown";
13181         }
13182 }
13183
13184 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13185 {
13186         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13187                 strcpy(str, "PCI Express");
13188                 return str;
13189         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13190                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13191
13192                 strcpy(str, "PCIX:");
13193
13194                 if ((clock_ctrl == 7) ||
13195                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13196                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13197                         strcat(str, "133MHz");
13198                 else if (clock_ctrl == 0)
13199                         strcat(str, "33MHz");
13200                 else if (clock_ctrl == 2)
13201                         strcat(str, "50MHz");
13202                 else if (clock_ctrl == 4)
13203                         strcat(str, "66MHz");
13204                 else if (clock_ctrl == 6)
13205                         strcat(str, "100MHz");
13206         } else {
13207                 strcpy(str, "PCI:");
13208                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13209                         strcat(str, "66MHz");
13210                 else
13211                         strcat(str, "33MHz");
13212         }
13213         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13214                 strcat(str, ":32-bit");
13215         else
13216                 strcat(str, ":64-bit");
13217         return str;
13218 }
13219
13220 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13221 {
13222         struct pci_dev *peer;
13223         unsigned int func, devnr = tp->pdev->devfn & ~7;
13224
13225         for (func = 0; func < 8; func++) {
13226                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13227                 if (peer && peer != tp->pdev)
13228                         break;
13229                 pci_dev_put(peer);
13230         }
13231         /* 5704 can be configured in single-port mode, set peer to
13232          * tp->pdev in that case.
13233          */
13234         if (!peer) {
13235                 peer = tp->pdev;
13236                 return peer;
13237         }
13238
13239         /*
13240          * We don't need to keep the refcount elevated; there's no way
13241          * to remove one half of this device without removing the other
13242          */
13243         pci_dev_put(peer);
13244
13245         return peer;
13246 }
13247
13248 static void __devinit tg3_init_coal(struct tg3 *tp)
13249 {
13250         struct ethtool_coalesce *ec = &tp->coal;
13251
13252         memset(ec, 0, sizeof(*ec));
13253         ec->cmd = ETHTOOL_GCOALESCE;
13254         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13255         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13256         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13257         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13258         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13259         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13260         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13261         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13262         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13263
13264         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13265                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13266                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13267                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13268                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13269                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13270         }
13271
13272         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13273                 ec->rx_coalesce_usecs_irq = 0;
13274                 ec->tx_coalesce_usecs_irq = 0;
13275                 ec->stats_block_coalesce_usecs = 0;
13276         }
13277 }
13278
13279 static int __devinit tg3_init_one(struct pci_dev *pdev,
13280                                   const struct pci_device_id *ent)
13281 {
13282         static int tg3_version_printed = 0;
13283         resource_size_t tg3reg_len;
13284         struct net_device *dev;
13285         struct tg3 *tp;
13286         int err, pm_cap;
13287         char str[40];
13288         u64 dma_mask, persist_dma_mask;
13289
13290         if (tg3_version_printed++ == 0)
13291                 printk(KERN_INFO "%s", version);
13292
13293         err = pci_enable_device(pdev);
13294         if (err) {
13295                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13296                        "aborting.\n");
13297                 return err;
13298         }
13299
13300         if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
13301                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13302                        "base address, aborting.\n");
13303                 err = -ENODEV;
13304                 goto err_out_disable_pdev;
13305         }
13306
13307         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13308         if (err) {
13309                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13310                        "aborting.\n");
13311                 goto err_out_disable_pdev;
13312         }
13313
13314         pci_set_master(pdev);
13315
13316         /* Find power-management capability. */
13317         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13318         if (pm_cap == 0) {
13319                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13320                        "aborting.\n");
13321                 err = -EIO;
13322                 goto err_out_free_res;
13323         }
13324
13325         dev = alloc_etherdev(sizeof(*tp));
13326         if (!dev) {
13327                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13328                 err = -ENOMEM;
13329                 goto err_out_free_res;
13330         }
13331
13332         SET_NETDEV_DEV(dev, &pdev->dev);
13333
13334 #if TG3_VLAN_TAG_USED
13335         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13336         dev->vlan_rx_register = tg3_vlan_rx_register;
13337 #endif
13338
13339         tp = netdev_priv(dev);
13340         tp->pdev = pdev;
13341         tp->dev = dev;
13342         tp->pm_cap = pm_cap;
13343         tp->rx_mode = TG3_DEF_RX_MODE;
13344         tp->tx_mode = TG3_DEF_TX_MODE;
13345
13346         if (tg3_debug > 0)
13347                 tp->msg_enable = tg3_debug;
13348         else
13349                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13350
13351         /* The word/byte swap controls here control register access byte
13352          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13353          * setting below.
13354          */
13355         tp->misc_host_ctrl =
13356                 MISC_HOST_CTRL_MASK_PCI_INT |
13357                 MISC_HOST_CTRL_WORD_SWAP |
13358                 MISC_HOST_CTRL_INDIR_ACCESS |
13359                 MISC_HOST_CTRL_PCISTATE_RW;
13360
13361         /* The NONFRM (non-frame) byte/word swap controls take effect
13362          * on descriptor entries, anything which isn't packet data.
13363          *
13364          * The StrongARM chips on the board (one for tx, one for rx)
13365          * are running in big-endian mode.
13366          */
13367         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13368                         GRC_MODE_WSWAP_NONFRM_DATA);
13369 #ifdef __BIG_ENDIAN
13370         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13371 #endif
13372         spin_lock_init(&tp->lock);
13373         spin_lock_init(&tp->indirect_lock);
13374         INIT_WORK(&tp->reset_task, tg3_reset_task);
13375
13376         dev->mem_start = pci_resource_start(pdev, BAR_0);
13377         tg3reg_len = pci_resource_len(pdev, BAR_0);
13378         dev->mem_end = dev->mem_start + tg3reg_len;
13379
13380         tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
13381         if (!tp->regs) {
13382                 printk(KERN_ERR PFX "Cannot map device registers, "
13383                        "aborting.\n");
13384                 err = -ENOMEM;
13385                 goto err_out_free_dev;
13386         }
13387
13388         tg3_init_link_config(tp);
13389
13390         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13391         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13392         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13393
13394         dev->open = tg3_open;
13395         dev->stop = tg3_close;
13396         dev->get_stats = tg3_get_stats;
13397         dev->set_multicast_list = tg3_set_rx_mode;
13398         dev->set_mac_address = tg3_set_mac_addr;
13399         dev->do_ioctl = tg3_ioctl;
13400         dev->tx_timeout = tg3_tx_timeout;
13401         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13402         dev->ethtool_ops = &tg3_ethtool_ops;
13403         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13404         dev->change_mtu = tg3_change_mtu;
13405         dev->irq = pdev->irq;
13406 #ifdef CONFIG_NET_POLL_CONTROLLER
13407         dev->poll_controller = tg3_poll_controller;
13408 #endif
13409
13410         err = tg3_get_invariants(tp);
13411         if (err) {
13412                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13413                        "aborting.\n");
13414                 goto err_out_iounmap;
13415         }
13416
13417         /* The EPB bridge inside 5714, 5715, and 5780 and any
13418          * device behind the EPB cannot support DMA addresses > 40-bit.
13419          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13420          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13421          * do DMA address check in tg3_start_xmit().
13422          */
13423         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13424                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13425         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13426                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13427 #ifdef CONFIG_HIGHMEM
13428                 dma_mask = DMA_64BIT_MASK;
13429 #endif
13430         } else
13431                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13432
13433         /* Configure DMA attributes. */
13434         if (dma_mask > DMA_32BIT_MASK) {
13435                 err = pci_set_dma_mask(pdev, dma_mask);
13436                 if (!err) {
13437                         dev->features |= NETIF_F_HIGHDMA;
13438                         err = pci_set_consistent_dma_mask(pdev,
13439                                                           persist_dma_mask);
13440                         if (err < 0) {
13441                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13442                                        "DMA for consistent allocations\n");
13443                                 goto err_out_iounmap;
13444                         }
13445                 }
13446         }
13447         if (err || dma_mask == DMA_32BIT_MASK) {
13448                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13449                 if (err) {
13450                         printk(KERN_ERR PFX "No usable DMA configuration, "
13451                                "aborting.\n");
13452                         goto err_out_iounmap;
13453                 }
13454         }
13455
13456         tg3_init_bufmgr_config(tp);
13457
13458         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13459                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13460         }
13461         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13462             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13463             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13464             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13465             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13466                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13467         } else {
13468                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13469         }
13470
13471         /* TSO is on by default on chips that support hardware TSO.
13472          * Firmware TSO on older chips gives lower performance, so it
13473          * is off by default, but can be enabled using ethtool.
13474          */
13475         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13476                 dev->features |= NETIF_F_TSO;
13477                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13478                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13479                         dev->features |= NETIF_F_TSO6;
13480                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13481                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13482                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13483                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13484                         dev->features |= NETIF_F_TSO_ECN;
13485         }
13486
13487
13488         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13489             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13490             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13491                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13492                 tp->rx_pending = 63;
13493         }
13494
13495         err = tg3_get_device_address(tp);
13496         if (err) {
13497                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13498                        "aborting.\n");
13499                 goto err_out_iounmap;
13500         }
13501
13502         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13503                 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
13504                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13505                                "base address for APE, aborting.\n");
13506                         err = -ENODEV;
13507                         goto err_out_iounmap;
13508                 }
13509
13510                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13511                 if (!tp->aperegs) {
13512                         printk(KERN_ERR PFX "Cannot map APE registers, "
13513                                "aborting.\n");
13514                         err = -ENOMEM;
13515                         goto err_out_iounmap;
13516                 }
13517
13518                 tg3_ape_lock_init(tp);
13519         }
13520
13521         /*
13522          * Reset chip in case UNDI or EFI driver did not shutdown
13523          * DMA self test will enable WDMAC and we'll see (spurious)
13524          * pending DMA on the PCI bus at that point.
13525          */
13526         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13527             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13528                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13529                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13530         }
13531
13532         err = tg3_test_dma(tp);
13533         if (err) {
13534                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13535                 goto err_out_apeunmap;
13536         }
13537
13538         /* Tigon3 can do ipv4 only... and some chips have buggy
13539          * checksumming.
13540          */
13541         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13542                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13543                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13544                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13545                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13546                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13547                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13548                         dev->features |= NETIF_F_IPV6_CSUM;
13549
13550                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13551         } else
13552                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13553
13554         /* flow control autonegotiation is default behavior */
13555         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13556         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13557
13558         tg3_init_coal(tp);
13559
13560         pci_set_drvdata(pdev, dev);
13561
13562         err = register_netdev(dev);
13563         if (err) {
13564                 printk(KERN_ERR PFX "Cannot register net device, "
13565                        "aborting.\n");
13566                 goto err_out_apeunmap;
13567         }
13568
13569         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13570                dev->name,
13571                tp->board_part_number,
13572                tp->pci_chip_rev_id,
13573                tg3_bus_string(tp, str),
13574                dev->dev_addr);
13575
13576         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13577                 printk(KERN_INFO
13578                        "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13579                        tp->dev->name,
13580                        tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13581                        tp->mdio_bus->phy_map[PHY_ADDR]->dev.bus_id);
13582         else
13583                 printk(KERN_INFO
13584                        "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13585                        tp->dev->name, tg3_phy_string(tp),
13586                        ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13587                         ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13588                          "10/100/1000Base-T")),
13589                        (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13590
13591         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13592                dev->name,
13593                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13594                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13595                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13596                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13597                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13598         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13599                dev->name, tp->dma_rwctrl,
13600                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13601                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13602
13603         return 0;
13604
13605 err_out_apeunmap:
13606         if (tp->aperegs) {
13607                 iounmap(tp->aperegs);
13608                 tp->aperegs = NULL;
13609         }
13610
13611 err_out_iounmap:
13612         if (tp->regs) {
13613                 iounmap(tp->regs);
13614                 tp->regs = NULL;
13615         }
13616
13617 err_out_free_dev:
13618         free_netdev(dev);
13619
13620 err_out_free_res:
13621         pci_release_regions(pdev);
13622
13623 err_out_disable_pdev:
13624         pci_disable_device(pdev);
13625         pci_set_drvdata(pdev, NULL);
13626         return err;
13627 }
13628
13629 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13630 {
13631         struct net_device *dev = pci_get_drvdata(pdev);
13632
13633         if (dev) {
13634                 struct tg3 *tp = netdev_priv(dev);
13635
13636                 flush_scheduled_work();
13637
13638                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13639                         tg3_phy_fini(tp);
13640                         tg3_mdio_fini(tp);
13641                 }
13642
13643                 unregister_netdev(dev);
13644                 if (tp->aperegs) {
13645                         iounmap(tp->aperegs);
13646                         tp->aperegs = NULL;
13647                 }
13648                 if (tp->regs) {
13649                         iounmap(tp->regs);
13650                         tp->regs = NULL;
13651                 }
13652                 free_netdev(dev);
13653                 pci_release_regions(pdev);
13654                 pci_disable_device(pdev);
13655                 pci_set_drvdata(pdev, NULL);
13656         }
13657 }
13658
13659 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13660 {
13661         struct net_device *dev = pci_get_drvdata(pdev);
13662         struct tg3 *tp = netdev_priv(dev);
13663         pci_power_t target_state;
13664         int err;
13665
13666         /* PCI register 4 needs to be saved whether netif_running() or not.
13667          * MSI address and data need to be saved if using MSI and
13668          * netif_running().
13669          */
13670         pci_save_state(pdev);
13671
13672         if (!netif_running(dev))
13673                 return 0;
13674
13675         flush_scheduled_work();
13676         tg3_phy_stop(tp);
13677         tg3_netif_stop(tp);
13678
13679         del_timer_sync(&tp->timer);
13680
13681         tg3_full_lock(tp, 1);
13682         tg3_disable_ints(tp);
13683         tg3_full_unlock(tp);
13684
13685         netif_device_detach(dev);
13686
13687         tg3_full_lock(tp, 0);
13688         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13689         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13690         tg3_full_unlock(tp);
13691
13692         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13693
13694         err = tg3_set_power_state(tp, target_state);
13695         if (err) {
13696                 int err2;
13697
13698                 tg3_full_lock(tp, 0);
13699
13700                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13701                 err2 = tg3_restart_hw(tp, 1);
13702                 if (err2)
13703                         goto out;
13704
13705                 tp->timer.expires = jiffies + tp->timer_offset;
13706                 add_timer(&tp->timer);
13707
13708                 netif_device_attach(dev);
13709                 tg3_netif_start(tp);
13710
13711 out:
13712                 tg3_full_unlock(tp);
13713
13714                 if (!err2)
13715                         tg3_phy_start(tp);
13716         }
13717
13718         return err;
13719 }
13720
13721 static int tg3_resume(struct pci_dev *pdev)
13722 {
13723         struct net_device *dev = pci_get_drvdata(pdev);
13724         struct tg3 *tp = netdev_priv(dev);
13725         int err;
13726
13727         pci_restore_state(tp->pdev);
13728
13729         if (!netif_running(dev))
13730                 return 0;
13731
13732         err = tg3_set_power_state(tp, PCI_D0);
13733         if (err)
13734                 return err;
13735
13736         netif_device_attach(dev);
13737
13738         tg3_full_lock(tp, 0);
13739
13740         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13741         err = tg3_restart_hw(tp, 1);
13742         if (err)
13743                 goto out;
13744
13745         tp->timer.expires = jiffies + tp->timer_offset;
13746         add_timer(&tp->timer);
13747
13748         tg3_netif_start(tp);
13749
13750 out:
13751         tg3_full_unlock(tp);
13752
13753         if (!err)
13754                 tg3_phy_start(tp);
13755
13756         return err;
13757 }
13758
13759 static struct pci_driver tg3_driver = {
13760         .name           = DRV_MODULE_NAME,
13761         .id_table       = tg3_pci_tbl,
13762         .probe          = tg3_init_one,
13763         .remove         = __devexit_p(tg3_remove_one),
13764         .suspend        = tg3_suspend,
13765         .resume         = tg3_resume
13766 };
13767
13768 static int __init tg3_init(void)
13769 {
13770         return pci_register_driver(&tg3_driver);
13771 }
13772
13773 static void __exit tg3_cleanup(void)
13774 {
13775         pci_unregister_driver(&tg3_driver);
13776 }
13777
13778 module_init(tg3_init);
13779 module_exit(tg3_cleanup);