tg3: Enable GPHY APD on select devices
[safe/jmp/linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #define BAR_0   0
58 #define BAR_2   2
59
60 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61 #define TG3_VLAN_TAG_USED 1
62 #else
63 #define TG3_VLAN_TAG_USED 0
64 #endif
65
66 #define TG3_TSO_SUPPORT 1
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.95"
73 #define DRV_MODULE_RELDATE      "November 3, 2008"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
134
135 #define TG3_RAW_IP_ALIGN 2
136
137 /* number of ETHTOOL_GSTATS u64's */
138 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
140 #define TG3_NUM_TEST            6
141
142 static char version[] __devinitdata =
143         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147 MODULE_LICENSE("GPL");
148 MODULE_VERSION(DRV_MODULE_VERSION);
149
150 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
151 module_param(tg3_debug, int, 0);
152 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154 static struct pci_device_id tg3_pci_tbl[] = {
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
214         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
216         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
217         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
218         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
219         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
220         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
221         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
222         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
223         {}
224 };
225
226 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
227
228 static const struct {
229         const char string[ETH_GSTRING_LEN];
230 } ethtool_stats_keys[TG3_NUM_STATS] = {
231         { "rx_octets" },
232         { "rx_fragments" },
233         { "rx_ucast_packets" },
234         { "rx_mcast_packets" },
235         { "rx_bcast_packets" },
236         { "rx_fcs_errors" },
237         { "rx_align_errors" },
238         { "rx_xon_pause_rcvd" },
239         { "rx_xoff_pause_rcvd" },
240         { "rx_mac_ctrl_rcvd" },
241         { "rx_xoff_entered" },
242         { "rx_frame_too_long_errors" },
243         { "rx_jabbers" },
244         { "rx_undersize_packets" },
245         { "rx_in_length_errors" },
246         { "rx_out_length_errors" },
247         { "rx_64_or_less_octet_packets" },
248         { "rx_65_to_127_octet_packets" },
249         { "rx_128_to_255_octet_packets" },
250         { "rx_256_to_511_octet_packets" },
251         { "rx_512_to_1023_octet_packets" },
252         { "rx_1024_to_1522_octet_packets" },
253         { "rx_1523_to_2047_octet_packets" },
254         { "rx_2048_to_4095_octet_packets" },
255         { "rx_4096_to_8191_octet_packets" },
256         { "rx_8192_to_9022_octet_packets" },
257
258         { "tx_octets" },
259         { "tx_collisions" },
260
261         { "tx_xon_sent" },
262         { "tx_xoff_sent" },
263         { "tx_flow_control" },
264         { "tx_mac_errors" },
265         { "tx_single_collisions" },
266         { "tx_mult_collisions" },
267         { "tx_deferred" },
268         { "tx_excessive_collisions" },
269         { "tx_late_collisions" },
270         { "tx_collide_2times" },
271         { "tx_collide_3times" },
272         { "tx_collide_4times" },
273         { "tx_collide_5times" },
274         { "tx_collide_6times" },
275         { "tx_collide_7times" },
276         { "tx_collide_8times" },
277         { "tx_collide_9times" },
278         { "tx_collide_10times" },
279         { "tx_collide_11times" },
280         { "tx_collide_12times" },
281         { "tx_collide_13times" },
282         { "tx_collide_14times" },
283         { "tx_collide_15times" },
284         { "tx_ucast_packets" },
285         { "tx_mcast_packets" },
286         { "tx_bcast_packets" },
287         { "tx_carrier_sense_errors" },
288         { "tx_discards" },
289         { "tx_errors" },
290
291         { "dma_writeq_full" },
292         { "dma_write_prioq_full" },
293         { "rxbds_empty" },
294         { "rx_discards" },
295         { "rx_errors" },
296         { "rx_threshold_hit" },
297
298         { "dma_readq_full" },
299         { "dma_read_prioq_full" },
300         { "tx_comp_queue_full" },
301
302         { "ring_set_send_prod_index" },
303         { "ring_status_update" },
304         { "nic_irqs" },
305         { "nic_avoided_irqs" },
306         { "nic_tx_threshold_hit" }
307 };
308
309 static const struct {
310         const char string[ETH_GSTRING_LEN];
311 } ethtool_test_keys[TG3_NUM_TEST] = {
312         { "nvram test     (online) " },
313         { "link test      (online) " },
314         { "register test  (offline)" },
315         { "memory test    (offline)" },
316         { "loopback test  (offline)" },
317         { "interrupt test (offline)" },
318 };
319
320 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->regs + off);
323 }
324
325 static u32 tg3_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->regs + off));
328 }
329
330 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
331 {
332         writel(val, tp->aperegs + off);
333 }
334
335 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
336 {
337         return (readl(tp->aperegs + off));
338 }
339
340 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         unsigned long flags;
343
344         spin_lock_irqsave(&tp->indirect_lock, flags);
345         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
347         spin_unlock_irqrestore(&tp->indirect_lock, flags);
348 }
349
350 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
351 {
352         writel(val, tp->regs + off);
353         readl(tp->regs + off);
354 }
355
356 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
357 {
358         unsigned long flags;
359         u32 val;
360
361         spin_lock_irqsave(&tp->indirect_lock, flags);
362         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
363         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
364         spin_unlock_irqrestore(&tp->indirect_lock, flags);
365         return val;
366 }
367
368 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
369 {
370         unsigned long flags;
371
372         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
373                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
374                                        TG3_64BIT_REG_LOW, val);
375                 return;
376         }
377         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
378                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
379                                        TG3_64BIT_REG_LOW, val);
380                 return;
381         }
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
385         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
386         spin_unlock_irqrestore(&tp->indirect_lock, flags);
387
388         /* In indirect mode when disabling interrupts, we also need
389          * to clear the interrupt bit in the GRC local ctrl register.
390          */
391         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
392             (val == 0x1)) {
393                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
394                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
395         }
396 }
397
398 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
399 {
400         unsigned long flags;
401         u32 val;
402
403         spin_lock_irqsave(&tp->indirect_lock, flags);
404         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
405         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
406         spin_unlock_irqrestore(&tp->indirect_lock, flags);
407         return val;
408 }
409
410 /* usec_wait specifies the wait time in usec when writing to certain registers
411  * where it is unsafe to read back the register without some delay.
412  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
413  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
414  */
415 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
416 {
417         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
418             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
419                 /* Non-posted methods */
420                 tp->write32(tp, off, val);
421         else {
422                 /* Posted method */
423                 tg3_write32(tp, off, val);
424                 if (usec_wait)
425                         udelay(usec_wait);
426                 tp->read32(tp, off);
427         }
428         /* Wait again after the read for the posted method to guarantee that
429          * the wait time is met.
430          */
431         if (usec_wait)
432                 udelay(usec_wait);
433 }
434
435 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
436 {
437         tp->write32_mbox(tp, off, val);
438         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
439             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
440                 tp->read32_mbox(tp, off);
441 }
442
443 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
444 {
445         void __iomem *mbox = tp->regs + off;
446         writel(val, mbox);
447         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
448                 writel(val, mbox);
449         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
450                 readl(mbox);
451 }
452
453 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
454 {
455         return (readl(tp->regs + off + GRCMBOX_BASE));
456 }
457
458 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
459 {
460         writel(val, tp->regs + off + GRCMBOX_BASE);
461 }
462
463 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
464 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
465 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
466 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
467 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
468
469 #define tw32(reg,val)           tp->write32(tp, reg, val)
470 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
471 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
472 #define tr32(reg)               tp->read32(tp, reg)
473
474 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
475 {
476         unsigned long flags;
477
478         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
479             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
480                 return;
481
482         spin_lock_irqsave(&tp->indirect_lock, flags);
483         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
484                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
485                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
486
487                 /* Always leave this as zero. */
488                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
489         } else {
490                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
491                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
492
493                 /* Always leave this as zero. */
494                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
495         }
496         spin_unlock_irqrestore(&tp->indirect_lock, flags);
497 }
498
499 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
500 {
501         unsigned long flags;
502
503         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
504             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
505                 *val = 0;
506                 return;
507         }
508
509         spin_lock_irqsave(&tp->indirect_lock, flags);
510         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
511                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
512                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
513
514                 /* Always leave this as zero. */
515                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
516         } else {
517                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
518                 *val = tr32(TG3PCI_MEM_WIN_DATA);
519
520                 /* Always leave this as zero. */
521                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
522         }
523         spin_unlock_irqrestore(&tp->indirect_lock, flags);
524 }
525
526 static void tg3_ape_lock_init(struct tg3 *tp)
527 {
528         int i;
529
530         /* Make sure the driver hasn't any stale locks. */
531         for (i = 0; i < 8; i++)
532                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
533                                 APE_LOCK_GRANT_DRIVER);
534 }
535
536 static int tg3_ape_lock(struct tg3 *tp, int locknum)
537 {
538         int i, off;
539         int ret = 0;
540         u32 status;
541
542         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
543                 return 0;
544
545         switch (locknum) {
546                 case TG3_APE_LOCK_GRC:
547                 case TG3_APE_LOCK_MEM:
548                         break;
549                 default:
550                         return -EINVAL;
551         }
552
553         off = 4 * locknum;
554
555         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
556
557         /* Wait for up to 1 millisecond to acquire lock. */
558         for (i = 0; i < 100; i++) {
559                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
560                 if (status == APE_LOCK_GRANT_DRIVER)
561                         break;
562                 udelay(10);
563         }
564
565         if (status != APE_LOCK_GRANT_DRIVER) {
566                 /* Revoke the lock request. */
567                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
568                                 APE_LOCK_GRANT_DRIVER);
569
570                 ret = -EBUSY;
571         }
572
573         return ret;
574 }
575
576 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
577 {
578         int off;
579
580         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
581                 return;
582
583         switch (locknum) {
584                 case TG3_APE_LOCK_GRC:
585                 case TG3_APE_LOCK_MEM:
586                         break;
587                 default:
588                         return;
589         }
590
591         off = 4 * locknum;
592         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
593 }
594
595 static void tg3_disable_ints(struct tg3 *tp)
596 {
597         tw32(TG3PCI_MISC_HOST_CTRL,
598              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
599         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
600 }
601
602 static inline void tg3_cond_int(struct tg3 *tp)
603 {
604         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
605             (tp->hw_status->status & SD_STATUS_UPDATED))
606                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
607         else
608                 tw32(HOSTCC_MODE, tp->coalesce_mode |
609                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
610 }
611
612 static void tg3_enable_ints(struct tg3 *tp)
613 {
614         tp->irq_sync = 0;
615         wmb();
616
617         tw32(TG3PCI_MISC_HOST_CTRL,
618              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
619         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
620                        (tp->last_tag << 24));
621         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
622                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
623                                (tp->last_tag << 24));
624         tg3_cond_int(tp);
625 }
626
627 static inline unsigned int tg3_has_work(struct tg3 *tp)
628 {
629         struct tg3_hw_status *sblk = tp->hw_status;
630         unsigned int work_exists = 0;
631
632         /* check for phy events */
633         if (!(tp->tg3_flags &
634               (TG3_FLAG_USE_LINKCHG_REG |
635                TG3_FLAG_POLL_SERDES))) {
636                 if (sblk->status & SD_STATUS_LINK_CHG)
637                         work_exists = 1;
638         }
639         /* check for RX/TX work to do */
640         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
641             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
642                 work_exists = 1;
643
644         return work_exists;
645 }
646
647 /* tg3_restart_ints
648  *  similar to tg3_enable_ints, but it accurately determines whether there
649  *  is new work pending and can return without flushing the PIO write
650  *  which reenables interrupts
651  */
652 static void tg3_restart_ints(struct tg3 *tp)
653 {
654         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
655                      tp->last_tag << 24);
656         mmiowb();
657
658         /* When doing tagged status, this work check is unnecessary.
659          * The last_tag we write above tells the chip which piece of
660          * work we've completed.
661          */
662         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
663             tg3_has_work(tp))
664                 tw32(HOSTCC_MODE, tp->coalesce_mode |
665                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
666 }
667
668 static inline void tg3_netif_stop(struct tg3 *tp)
669 {
670         tp->dev->trans_start = jiffies; /* prevent tx timeout */
671         napi_disable(&tp->napi);
672         netif_tx_disable(tp->dev);
673 }
674
675 static inline void tg3_netif_start(struct tg3 *tp)
676 {
677         netif_wake_queue(tp->dev);
678         /* NOTE: unconditional netif_wake_queue is only appropriate
679          * so long as all callers are assured to have free tx slots
680          * (such as after tg3_init_hw)
681          */
682         napi_enable(&tp->napi);
683         tp->hw_status->status |= SD_STATUS_UPDATED;
684         tg3_enable_ints(tp);
685 }
686
687 static void tg3_switch_clocks(struct tg3 *tp)
688 {
689         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
690         u32 orig_clock_ctrl;
691
692         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
693             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
694                 return;
695
696         orig_clock_ctrl = clock_ctrl;
697         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
698                        CLOCK_CTRL_CLKRUN_OENABLE |
699                        0x1f);
700         tp->pci_clock_ctrl = clock_ctrl;
701
702         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
703                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
704                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
705                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
706                 }
707         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
708                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
709                             clock_ctrl |
710                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
711                             40);
712                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
713                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
714                             40);
715         }
716         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
717 }
718
719 #define PHY_BUSY_LOOPS  5000
720
721 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
722 {
723         u32 frame_val;
724         unsigned int loops;
725         int ret;
726
727         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
728                 tw32_f(MAC_MI_MODE,
729                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
730                 udelay(80);
731         }
732
733         *val = 0x0;
734
735         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
736                       MI_COM_PHY_ADDR_MASK);
737         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
738                       MI_COM_REG_ADDR_MASK);
739         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
740
741         tw32_f(MAC_MI_COM, frame_val);
742
743         loops = PHY_BUSY_LOOPS;
744         while (loops != 0) {
745                 udelay(10);
746                 frame_val = tr32(MAC_MI_COM);
747
748                 if ((frame_val & MI_COM_BUSY) == 0) {
749                         udelay(5);
750                         frame_val = tr32(MAC_MI_COM);
751                         break;
752                 }
753                 loops -= 1;
754         }
755
756         ret = -EBUSY;
757         if (loops != 0) {
758                 *val = frame_val & MI_COM_DATA_MASK;
759                 ret = 0;
760         }
761
762         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
763                 tw32_f(MAC_MI_MODE, tp->mi_mode);
764                 udelay(80);
765         }
766
767         return ret;
768 }
769
770 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
771 {
772         u32 frame_val;
773         unsigned int loops;
774         int ret;
775
776         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
777             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
778                 return 0;
779
780         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
781                 tw32_f(MAC_MI_MODE,
782                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
783                 udelay(80);
784         }
785
786         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
787                       MI_COM_PHY_ADDR_MASK);
788         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
789                       MI_COM_REG_ADDR_MASK);
790         frame_val |= (val & MI_COM_DATA_MASK);
791         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
792
793         tw32_f(MAC_MI_COM, frame_val);
794
795         loops = PHY_BUSY_LOOPS;
796         while (loops != 0) {
797                 udelay(10);
798                 frame_val = tr32(MAC_MI_COM);
799                 if ((frame_val & MI_COM_BUSY) == 0) {
800                         udelay(5);
801                         frame_val = tr32(MAC_MI_COM);
802                         break;
803                 }
804                 loops -= 1;
805         }
806
807         ret = -EBUSY;
808         if (loops != 0)
809                 ret = 0;
810
811         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
812                 tw32_f(MAC_MI_MODE, tp->mi_mode);
813                 udelay(80);
814         }
815
816         return ret;
817 }
818
819 static int tg3_bmcr_reset(struct tg3 *tp)
820 {
821         u32 phy_control;
822         int limit, err;
823
824         /* OK, reset it, and poll the BMCR_RESET bit until it
825          * clears or we time out.
826          */
827         phy_control = BMCR_RESET;
828         err = tg3_writephy(tp, MII_BMCR, phy_control);
829         if (err != 0)
830                 return -EBUSY;
831
832         limit = 5000;
833         while (limit--) {
834                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
835                 if (err != 0)
836                         return -EBUSY;
837
838                 if ((phy_control & BMCR_RESET) == 0) {
839                         udelay(40);
840                         break;
841                 }
842                 udelay(10);
843         }
844         if (limit <= 0)
845                 return -EBUSY;
846
847         return 0;
848 }
849
850 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
851 {
852         struct tg3 *tp = (struct tg3 *)bp->priv;
853         u32 val;
854
855         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
856                 return -EAGAIN;
857
858         if (tg3_readphy(tp, reg, &val))
859                 return -EIO;
860
861         return val;
862 }
863
864 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
865 {
866         struct tg3 *tp = (struct tg3 *)bp->priv;
867
868         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
869                 return -EAGAIN;
870
871         if (tg3_writephy(tp, reg, val))
872                 return -EIO;
873
874         return 0;
875 }
876
877 static int tg3_mdio_reset(struct mii_bus *bp)
878 {
879         return 0;
880 }
881
882 static void tg3_mdio_config_5785(struct tg3 *tp)
883 {
884         u32 val;
885         struct phy_device *phydev;
886
887         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
888         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
889         case TG3_PHY_ID_BCM50610:
890                 val = MAC_PHYCFG2_50610_LED_MODES;
891                 break;
892         case TG3_PHY_ID_BCMAC131:
893                 val = MAC_PHYCFG2_AC131_LED_MODES;
894                 break;
895         case TG3_PHY_ID_RTL8211C:
896                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
897                 break;
898         case TG3_PHY_ID_RTL8201E:
899                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
900                 break;
901         default:
902                 return;
903         }
904
905         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
906                 tw32(MAC_PHYCFG2, val);
907
908                 val = tr32(MAC_PHYCFG1);
909                 val &= ~MAC_PHYCFG1_RGMII_INT;
910                 tw32(MAC_PHYCFG1, val);
911
912                 return;
913         }
914
915         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
916                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
917                        MAC_PHYCFG2_FMODE_MASK_MASK |
918                        MAC_PHYCFG2_GMODE_MASK_MASK |
919                        MAC_PHYCFG2_ACT_MASK_MASK   |
920                        MAC_PHYCFG2_QUAL_MASK_MASK |
921                        MAC_PHYCFG2_INBAND_ENABLE;
922
923         tw32(MAC_PHYCFG2, val);
924
925         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
926                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
927         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
928                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
929                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
930                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
931                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
932         }
933         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
934
935         val = tr32(MAC_EXT_RGMII_MODE);
936         val &= ~(MAC_RGMII_MODE_RX_INT_B |
937                  MAC_RGMII_MODE_RX_QUALITY |
938                  MAC_RGMII_MODE_RX_ACTIVITY |
939                  MAC_RGMII_MODE_RX_ENG_DET |
940                  MAC_RGMII_MODE_TX_ENABLE |
941                  MAC_RGMII_MODE_TX_LOWPWR |
942                  MAC_RGMII_MODE_TX_RESET);
943         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
944                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
945                         val |= MAC_RGMII_MODE_RX_INT_B |
946                                MAC_RGMII_MODE_RX_QUALITY |
947                                MAC_RGMII_MODE_RX_ACTIVITY |
948                                MAC_RGMII_MODE_RX_ENG_DET;
949                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
950                         val |= MAC_RGMII_MODE_TX_ENABLE |
951                                MAC_RGMII_MODE_TX_LOWPWR |
952                                MAC_RGMII_MODE_TX_RESET;
953         }
954         tw32(MAC_EXT_RGMII_MODE, val);
955 }
956
957 static void tg3_mdio_start(struct tg3 *tp)
958 {
959         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
960                 mutex_lock(&tp->mdio_bus->mdio_lock);
961                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
962                 mutex_unlock(&tp->mdio_bus->mdio_lock);
963         }
964
965         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
966         tw32_f(MAC_MI_MODE, tp->mi_mode);
967         udelay(80);
968
969         if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
970             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
971                 tg3_mdio_config_5785(tp);
972 }
973
974 static void tg3_mdio_stop(struct tg3 *tp)
975 {
976         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
977                 mutex_lock(&tp->mdio_bus->mdio_lock);
978                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
979                 mutex_unlock(&tp->mdio_bus->mdio_lock);
980         }
981 }
982
983 static int tg3_mdio_init(struct tg3 *tp)
984 {
985         int i;
986         u32 reg;
987         struct phy_device *phydev;
988
989         tg3_mdio_start(tp);
990
991         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
992             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
993                 return 0;
994
995         tp->mdio_bus = mdiobus_alloc();
996         if (tp->mdio_bus == NULL)
997                 return -ENOMEM;
998
999         tp->mdio_bus->name     = "tg3 mdio bus";
1000         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1001                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1002         tp->mdio_bus->priv     = tp;
1003         tp->mdio_bus->parent   = &tp->pdev->dev;
1004         tp->mdio_bus->read     = &tg3_mdio_read;
1005         tp->mdio_bus->write    = &tg3_mdio_write;
1006         tp->mdio_bus->reset    = &tg3_mdio_reset;
1007         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1008         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1009
1010         for (i = 0; i < PHY_MAX_ADDR; i++)
1011                 tp->mdio_bus->irq[i] = PHY_POLL;
1012
1013         /* The bus registration will look for all the PHYs on the mdio bus.
1014          * Unfortunately, it does not ensure the PHY is powered up before
1015          * accessing the PHY ID registers.  A chip reset is the
1016          * quickest way to bring the device back to an operational state..
1017          */
1018         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1019                 tg3_bmcr_reset(tp);
1020
1021         i = mdiobus_register(tp->mdio_bus);
1022         if (i) {
1023                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1024                         tp->dev->name, i);
1025                 mdiobus_free(tp->mdio_bus);
1026                 return i;
1027         }
1028
1029         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1030
1031         if (!phydev || !phydev->drv) {
1032                 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1033                 mdiobus_unregister(tp->mdio_bus);
1034                 mdiobus_free(tp->mdio_bus);
1035                 return -ENODEV;
1036         }
1037
1038         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1039         case TG3_PHY_ID_BCM50610:
1040                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1041                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1042                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1043                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1044                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1045                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1046                 /* fallthru */
1047         case TG3_PHY_ID_RTL8211C:
1048                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1049                 break;
1050         case TG3_PHY_ID_RTL8201E:
1051         case TG3_PHY_ID_BCMAC131:
1052                 phydev->interface = PHY_INTERFACE_MODE_MII;
1053                 break;
1054         }
1055
1056         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1057
1058         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1059                 tg3_mdio_config_5785(tp);
1060
1061         return 0;
1062 }
1063
1064 static void tg3_mdio_fini(struct tg3 *tp)
1065 {
1066         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1067                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1068                 mdiobus_unregister(tp->mdio_bus);
1069                 mdiobus_free(tp->mdio_bus);
1070                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1071         }
1072 }
1073
1074 /* tp->lock is held. */
1075 static inline void tg3_generate_fw_event(struct tg3 *tp)
1076 {
1077         u32 val;
1078
1079         val = tr32(GRC_RX_CPU_EVENT);
1080         val |= GRC_RX_CPU_DRIVER_EVENT;
1081         tw32_f(GRC_RX_CPU_EVENT, val);
1082
1083         tp->last_event_jiffies = jiffies;
1084 }
1085
1086 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1087
1088 /* tp->lock is held. */
1089 static void tg3_wait_for_event_ack(struct tg3 *tp)
1090 {
1091         int i;
1092         unsigned int delay_cnt;
1093         long time_remain;
1094
1095         /* If enough time has passed, no wait is necessary. */
1096         time_remain = (long)(tp->last_event_jiffies + 1 +
1097                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1098                       (long)jiffies;
1099         if (time_remain < 0)
1100                 return;
1101
1102         /* Check if we can shorten the wait time. */
1103         delay_cnt = jiffies_to_usecs(time_remain);
1104         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1105                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1106         delay_cnt = (delay_cnt >> 3) + 1;
1107
1108         for (i = 0; i < delay_cnt; i++) {
1109                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1110                         break;
1111                 udelay(8);
1112         }
1113 }
1114
1115 /* tp->lock is held. */
1116 static void tg3_ump_link_report(struct tg3 *tp)
1117 {
1118         u32 reg;
1119         u32 val;
1120
1121         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1122             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1123                 return;
1124
1125         tg3_wait_for_event_ack(tp);
1126
1127         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1128
1129         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1130
1131         val = 0;
1132         if (!tg3_readphy(tp, MII_BMCR, &reg))
1133                 val = reg << 16;
1134         if (!tg3_readphy(tp, MII_BMSR, &reg))
1135                 val |= (reg & 0xffff);
1136         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1137
1138         val = 0;
1139         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1140                 val = reg << 16;
1141         if (!tg3_readphy(tp, MII_LPA, &reg))
1142                 val |= (reg & 0xffff);
1143         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1144
1145         val = 0;
1146         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1147                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1148                         val = reg << 16;
1149                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1150                         val |= (reg & 0xffff);
1151         }
1152         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1153
1154         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1155                 val = reg << 16;
1156         else
1157                 val = 0;
1158         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1159
1160         tg3_generate_fw_event(tp);
1161 }
1162
1163 static void tg3_link_report(struct tg3 *tp)
1164 {
1165         if (!netif_carrier_ok(tp->dev)) {
1166                 if (netif_msg_link(tp))
1167                         printk(KERN_INFO PFX "%s: Link is down.\n",
1168                                tp->dev->name);
1169                 tg3_ump_link_report(tp);
1170         } else if (netif_msg_link(tp)) {
1171                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1172                        tp->dev->name,
1173                        (tp->link_config.active_speed == SPEED_1000 ?
1174                         1000 :
1175                         (tp->link_config.active_speed == SPEED_100 ?
1176                          100 : 10)),
1177                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1178                         "full" : "half"));
1179
1180                 printk(KERN_INFO PFX
1181                        "%s: Flow control is %s for TX and %s for RX.\n",
1182                        tp->dev->name,
1183                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1184                        "on" : "off",
1185                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1186                        "on" : "off");
1187                 tg3_ump_link_report(tp);
1188         }
1189 }
1190
1191 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1192 {
1193         u16 miireg;
1194
1195         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1196                 miireg = ADVERTISE_PAUSE_CAP;
1197         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1198                 miireg = ADVERTISE_PAUSE_ASYM;
1199         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1200                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1201         else
1202                 miireg = 0;
1203
1204         return miireg;
1205 }
1206
1207 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1208 {
1209         u16 miireg;
1210
1211         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1212                 miireg = ADVERTISE_1000XPAUSE;
1213         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1214                 miireg = ADVERTISE_1000XPSE_ASYM;
1215         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1216                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1217         else
1218                 miireg = 0;
1219
1220         return miireg;
1221 }
1222
1223 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1224 {
1225         u8 cap = 0;
1226
1227         if (lcladv & ADVERTISE_PAUSE_CAP) {
1228                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1229                         if (rmtadv & LPA_PAUSE_CAP)
1230                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1231                         else if (rmtadv & LPA_PAUSE_ASYM)
1232                                 cap = TG3_FLOW_CTRL_RX;
1233                 } else {
1234                         if (rmtadv & LPA_PAUSE_CAP)
1235                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1236                 }
1237         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1238                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1239                         cap = TG3_FLOW_CTRL_TX;
1240         }
1241
1242         return cap;
1243 }
1244
1245 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1246 {
1247         u8 cap = 0;
1248
1249         if (lcladv & ADVERTISE_1000XPAUSE) {
1250                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1251                         if (rmtadv & LPA_1000XPAUSE)
1252                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1253                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1254                                 cap = TG3_FLOW_CTRL_RX;
1255                 } else {
1256                         if (rmtadv & LPA_1000XPAUSE)
1257                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1258                 }
1259         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1260                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1261                         cap = TG3_FLOW_CTRL_TX;
1262         }
1263
1264         return cap;
1265 }
1266
1267 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1268 {
1269         u8 autoneg;
1270         u8 flowctrl = 0;
1271         u32 old_rx_mode = tp->rx_mode;
1272         u32 old_tx_mode = tp->tx_mode;
1273
1274         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1275                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1276         else
1277                 autoneg = tp->link_config.autoneg;
1278
1279         if (autoneg == AUTONEG_ENABLE &&
1280             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1281                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1282                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1283                 else
1284                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1285         } else
1286                 flowctrl = tp->link_config.flowctrl;
1287
1288         tp->link_config.active_flowctrl = flowctrl;
1289
1290         if (flowctrl & TG3_FLOW_CTRL_RX)
1291                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1292         else
1293                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1294
1295         if (old_rx_mode != tp->rx_mode)
1296                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1297
1298         if (flowctrl & TG3_FLOW_CTRL_TX)
1299                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1300         else
1301                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1302
1303         if (old_tx_mode != tp->tx_mode)
1304                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1305 }
1306
1307 static void tg3_adjust_link(struct net_device *dev)
1308 {
1309         u8 oldflowctrl, linkmesg = 0;
1310         u32 mac_mode, lcl_adv, rmt_adv;
1311         struct tg3 *tp = netdev_priv(dev);
1312         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1313
1314         spin_lock(&tp->lock);
1315
1316         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1317                                     MAC_MODE_HALF_DUPLEX);
1318
1319         oldflowctrl = tp->link_config.active_flowctrl;
1320
1321         if (phydev->link) {
1322                 lcl_adv = 0;
1323                 rmt_adv = 0;
1324
1325                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1326                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1327                 else
1328                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1329
1330                 if (phydev->duplex == DUPLEX_HALF)
1331                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1332                 else {
1333                         lcl_adv = tg3_advert_flowctrl_1000T(
1334                                   tp->link_config.flowctrl);
1335
1336                         if (phydev->pause)
1337                                 rmt_adv = LPA_PAUSE_CAP;
1338                         if (phydev->asym_pause)
1339                                 rmt_adv |= LPA_PAUSE_ASYM;
1340                 }
1341
1342                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1343         } else
1344                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1345
1346         if (mac_mode != tp->mac_mode) {
1347                 tp->mac_mode = mac_mode;
1348                 tw32_f(MAC_MODE, tp->mac_mode);
1349                 udelay(40);
1350         }
1351
1352         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1353                 if (phydev->speed == SPEED_10)
1354                         tw32(MAC_MI_STAT,
1355                              MAC_MI_STAT_10MBPS_MODE |
1356                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1357                 else
1358                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1359         }
1360
1361         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1362                 tw32(MAC_TX_LENGTHS,
1363                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1364                       (6 << TX_LENGTHS_IPG_SHIFT) |
1365                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1366         else
1367                 tw32(MAC_TX_LENGTHS,
1368                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1369                       (6 << TX_LENGTHS_IPG_SHIFT) |
1370                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1371
1372         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1373             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1374             phydev->speed != tp->link_config.active_speed ||
1375             phydev->duplex != tp->link_config.active_duplex ||
1376             oldflowctrl != tp->link_config.active_flowctrl)
1377             linkmesg = 1;
1378
1379         tp->link_config.active_speed = phydev->speed;
1380         tp->link_config.active_duplex = phydev->duplex;
1381
1382         spin_unlock(&tp->lock);
1383
1384         if (linkmesg)
1385                 tg3_link_report(tp);
1386 }
1387
1388 static int tg3_phy_init(struct tg3 *tp)
1389 {
1390         struct phy_device *phydev;
1391
1392         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1393                 return 0;
1394
1395         /* Bring the PHY back to a known state. */
1396         tg3_bmcr_reset(tp);
1397
1398         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1399
1400         /* Attach the MAC to the PHY. */
1401         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1402                              phydev->dev_flags, phydev->interface);
1403         if (IS_ERR(phydev)) {
1404                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1405                 return PTR_ERR(phydev);
1406         }
1407
1408         /* Mask with MAC supported features. */
1409         switch (phydev->interface) {
1410         case PHY_INTERFACE_MODE_GMII:
1411         case PHY_INTERFACE_MODE_RGMII:
1412                 phydev->supported &= (PHY_GBIT_FEATURES |
1413                                       SUPPORTED_Pause |
1414                                       SUPPORTED_Asym_Pause);
1415                 break;
1416         case PHY_INTERFACE_MODE_MII:
1417                 phydev->supported &= (PHY_BASIC_FEATURES |
1418                                       SUPPORTED_Pause |
1419                                       SUPPORTED_Asym_Pause);
1420                 break;
1421         default:
1422                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1423                 return -EINVAL;
1424         }
1425
1426         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1427
1428         phydev->advertising = phydev->supported;
1429
1430         return 0;
1431 }
1432
1433 static void tg3_phy_start(struct tg3 *tp)
1434 {
1435         struct phy_device *phydev;
1436
1437         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1438                 return;
1439
1440         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1441
1442         if (tp->link_config.phy_is_low_power) {
1443                 tp->link_config.phy_is_low_power = 0;
1444                 phydev->speed = tp->link_config.orig_speed;
1445                 phydev->duplex = tp->link_config.orig_duplex;
1446                 phydev->autoneg = tp->link_config.orig_autoneg;
1447                 phydev->advertising = tp->link_config.orig_advertising;
1448         }
1449
1450         phy_start(phydev);
1451
1452         phy_start_aneg(phydev);
1453 }
1454
1455 static void tg3_phy_stop(struct tg3 *tp)
1456 {
1457         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1458                 return;
1459
1460         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1461 }
1462
1463 static void tg3_phy_fini(struct tg3 *tp)
1464 {
1465         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1466                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1467                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1468         }
1469 }
1470
1471 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1472 {
1473         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1474         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1475 }
1476
1477 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1478 {
1479         u32 reg;
1480
1481         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1482                 return;
1483
1484         reg = MII_TG3_MISC_SHDW_WREN |
1485               MII_TG3_MISC_SHDW_SCR5_SEL |
1486               MII_TG3_MISC_SHDW_SCR5_LPED |
1487               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1488               MII_TG3_MISC_SHDW_SCR5_SDTL |
1489               MII_TG3_MISC_SHDW_SCR5_C125OE;
1490         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1491                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1492
1493         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1494
1495
1496         reg = MII_TG3_MISC_SHDW_WREN |
1497               MII_TG3_MISC_SHDW_APD_SEL |
1498               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1499         if (enable)
1500                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1501
1502         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1503 }
1504
1505 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1506 {
1507         u32 phy;
1508
1509         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1510             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1511                 return;
1512
1513         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1514                 u32 ephy;
1515
1516                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1517                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1518                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1519                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1520                                 if (enable)
1521                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1522                                 else
1523                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1524                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1525                         }
1526                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1527                 }
1528         } else {
1529                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1530                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1531                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1532                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1533                         if (enable)
1534                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1535                         else
1536                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1537                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1538                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1539                 }
1540         }
1541 }
1542
1543 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1544 {
1545         u32 val;
1546
1547         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1548                 return;
1549
1550         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1551             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1552                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1553                              (val | (1 << 15) | (1 << 4)));
1554 }
1555
1556 static void tg3_phy_apply_otp(struct tg3 *tp)
1557 {
1558         u32 otp, phy;
1559
1560         if (!tp->phy_otp)
1561                 return;
1562
1563         otp = tp->phy_otp;
1564
1565         /* Enable SM_DSP clock and tx 6dB coding. */
1566         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1567               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1568               MII_TG3_AUXCTL_ACTL_TX_6DB;
1569         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1570
1571         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1572         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1573         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1574
1575         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1576               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1577         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1578
1579         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1580         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1581         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1582
1583         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1584         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1585
1586         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1587         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1588
1589         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1590               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1591         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1592
1593         /* Turn off SM_DSP clock. */
1594         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1595               MII_TG3_AUXCTL_ACTL_TX_6DB;
1596         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1597 }
1598
1599 static int tg3_wait_macro_done(struct tg3 *tp)
1600 {
1601         int limit = 100;
1602
1603         while (limit--) {
1604                 u32 tmp32;
1605
1606                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1607                         if ((tmp32 & 0x1000) == 0)
1608                                 break;
1609                 }
1610         }
1611         if (limit <= 0)
1612                 return -EBUSY;
1613
1614         return 0;
1615 }
1616
1617 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1618 {
1619         static const u32 test_pat[4][6] = {
1620         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1621         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1622         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1623         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1624         };
1625         int chan;
1626
1627         for (chan = 0; chan < 4; chan++) {
1628                 int i;
1629
1630                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1631                              (chan * 0x2000) | 0x0200);
1632                 tg3_writephy(tp, 0x16, 0x0002);
1633
1634                 for (i = 0; i < 6; i++)
1635                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1636                                      test_pat[chan][i]);
1637
1638                 tg3_writephy(tp, 0x16, 0x0202);
1639                 if (tg3_wait_macro_done(tp)) {
1640                         *resetp = 1;
1641                         return -EBUSY;
1642                 }
1643
1644                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1645                              (chan * 0x2000) | 0x0200);
1646                 tg3_writephy(tp, 0x16, 0x0082);
1647                 if (tg3_wait_macro_done(tp)) {
1648                         *resetp = 1;
1649                         return -EBUSY;
1650                 }
1651
1652                 tg3_writephy(tp, 0x16, 0x0802);
1653                 if (tg3_wait_macro_done(tp)) {
1654                         *resetp = 1;
1655                         return -EBUSY;
1656                 }
1657
1658                 for (i = 0; i < 6; i += 2) {
1659                         u32 low, high;
1660
1661                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1662                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1663                             tg3_wait_macro_done(tp)) {
1664                                 *resetp = 1;
1665                                 return -EBUSY;
1666                         }
1667                         low &= 0x7fff;
1668                         high &= 0x000f;
1669                         if (low != test_pat[chan][i] ||
1670                             high != test_pat[chan][i+1]) {
1671                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1672                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1673                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1674
1675                                 return -EBUSY;
1676                         }
1677                 }
1678         }
1679
1680         return 0;
1681 }
1682
1683 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1684 {
1685         int chan;
1686
1687         for (chan = 0; chan < 4; chan++) {
1688                 int i;
1689
1690                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1691                              (chan * 0x2000) | 0x0200);
1692                 tg3_writephy(tp, 0x16, 0x0002);
1693                 for (i = 0; i < 6; i++)
1694                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1695                 tg3_writephy(tp, 0x16, 0x0202);
1696                 if (tg3_wait_macro_done(tp))
1697                         return -EBUSY;
1698         }
1699
1700         return 0;
1701 }
1702
1703 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1704 {
1705         u32 reg32, phy9_orig;
1706         int retries, do_phy_reset, err;
1707
1708         retries = 10;
1709         do_phy_reset = 1;
1710         do {
1711                 if (do_phy_reset) {
1712                         err = tg3_bmcr_reset(tp);
1713                         if (err)
1714                                 return err;
1715                         do_phy_reset = 0;
1716                 }
1717
1718                 /* Disable transmitter and interrupt.  */
1719                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1720                         continue;
1721
1722                 reg32 |= 0x3000;
1723                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1724
1725                 /* Set full-duplex, 1000 mbps.  */
1726                 tg3_writephy(tp, MII_BMCR,
1727                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1728
1729                 /* Set to master mode.  */
1730                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1731                         continue;
1732
1733                 tg3_writephy(tp, MII_TG3_CTRL,
1734                              (MII_TG3_CTRL_AS_MASTER |
1735                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1736
1737                 /* Enable SM_DSP_CLOCK and 6dB.  */
1738                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1739
1740                 /* Block the PHY control access.  */
1741                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1742                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1743
1744                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1745                 if (!err)
1746                         break;
1747         } while (--retries);
1748
1749         err = tg3_phy_reset_chanpat(tp);
1750         if (err)
1751                 return err;
1752
1753         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1754         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1755
1756         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1757         tg3_writephy(tp, 0x16, 0x0000);
1758
1759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1760             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1761                 /* Set Extended packet length bit for jumbo frames */
1762                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1763         }
1764         else {
1765                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1766         }
1767
1768         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1769
1770         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1771                 reg32 &= ~0x3000;
1772                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1773         } else if (!err)
1774                 err = -EBUSY;
1775
1776         return err;
1777 }
1778
1779 /* This will reset the tigon3 PHY if there is no valid
1780  * link unless the FORCE argument is non-zero.
1781  */
1782 static int tg3_phy_reset(struct tg3 *tp)
1783 {
1784         u32 cpmuctrl;
1785         u32 phy_status;
1786         int err;
1787
1788         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1789                 u32 val;
1790
1791                 val = tr32(GRC_MISC_CFG);
1792                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1793                 udelay(40);
1794         }
1795         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1796         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1797         if (err != 0)
1798                 return -EBUSY;
1799
1800         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1801                 netif_carrier_off(tp->dev);
1802                 tg3_link_report(tp);
1803         }
1804
1805         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1806             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1807             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1808                 err = tg3_phy_reset_5703_4_5(tp);
1809                 if (err)
1810                         return err;
1811                 goto out;
1812         }
1813
1814         cpmuctrl = 0;
1815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1816             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1817                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1818                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1819                         tw32(TG3_CPMU_CTRL,
1820                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1821         }
1822
1823         err = tg3_bmcr_reset(tp);
1824         if (err)
1825                 return err;
1826
1827         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1828                 u32 phy;
1829
1830                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1831                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1832
1833                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1834         }
1835
1836         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1837             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1838                 u32 val;
1839
1840                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1841                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1842                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1843                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1844                         udelay(40);
1845                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1846                 }
1847         }
1848
1849         tg3_phy_apply_otp(tp);
1850
1851         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1852                 tg3_phy_toggle_apd(tp, true);
1853         else
1854                 tg3_phy_toggle_apd(tp, false);
1855
1856 out:
1857         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1858                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1859                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1860                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1861                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1862                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1863                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1864         }
1865         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1866                 tg3_writephy(tp, 0x1c, 0x8d68);
1867                 tg3_writephy(tp, 0x1c, 0x8d68);
1868         }
1869         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1870                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1871                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1872                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1873                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1874                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1875                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1876                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1877                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1878         }
1879         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1880                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1881                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1882                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1883                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1884                         tg3_writephy(tp, MII_TG3_TEST1,
1885                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1886                 } else
1887                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1888                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1889         }
1890         /* Set Extended packet length bit (bit 14) on all chips that */
1891         /* support jumbo frames */
1892         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1893                 /* Cannot do read-modify-write on 5401 */
1894                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1895         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1896                 u32 phy_reg;
1897
1898                 /* Set bit 14 with read-modify-write to preserve other bits */
1899                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1900                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1901                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1902         }
1903
1904         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1905          * jumbo frames transmission.
1906          */
1907         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1908                 u32 phy_reg;
1909
1910                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1911                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1912                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1913         }
1914
1915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1916                 /* adjust output voltage */
1917                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1918         }
1919
1920         tg3_phy_toggle_automdix(tp, 1);
1921         tg3_phy_set_wirespeed(tp);
1922         return 0;
1923 }
1924
1925 static void tg3_frob_aux_power(struct tg3 *tp)
1926 {
1927         struct tg3 *tp_peer = tp;
1928
1929         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1930                 return;
1931
1932         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1933             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1934                 struct net_device *dev_peer;
1935
1936                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1937                 /* remove_one() may have been run on the peer. */
1938                 if (!dev_peer)
1939                         tp_peer = tp;
1940                 else
1941                         tp_peer = netdev_priv(dev_peer);
1942         }
1943
1944         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1945             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1946             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1947             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1948                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1949                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1950                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1951                                     (GRC_LCLCTRL_GPIO_OE0 |
1952                                      GRC_LCLCTRL_GPIO_OE1 |
1953                                      GRC_LCLCTRL_GPIO_OE2 |
1954                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1955                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1956                                     100);
1957                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1958                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1959                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1960                                              GRC_LCLCTRL_GPIO_OE1 |
1961                                              GRC_LCLCTRL_GPIO_OE2 |
1962                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1963                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1964                                              tp->grc_local_ctrl;
1965                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1966
1967                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1968                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1969
1970                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1971                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1972                 } else {
1973                         u32 no_gpio2;
1974                         u32 grc_local_ctrl = 0;
1975
1976                         if (tp_peer != tp &&
1977                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1978                                 return;
1979
1980                         /* Workaround to prevent overdrawing Amps. */
1981                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1982                             ASIC_REV_5714) {
1983                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1984                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1985                                             grc_local_ctrl, 100);
1986                         }
1987
1988                         /* On 5753 and variants, GPIO2 cannot be used. */
1989                         no_gpio2 = tp->nic_sram_data_cfg &
1990                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1991
1992                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1993                                          GRC_LCLCTRL_GPIO_OE1 |
1994                                          GRC_LCLCTRL_GPIO_OE2 |
1995                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1996                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1997                         if (no_gpio2) {
1998                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1999                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2000                         }
2001                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2002                                                     grc_local_ctrl, 100);
2003
2004                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2005
2006                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2007                                                     grc_local_ctrl, 100);
2008
2009                         if (!no_gpio2) {
2010                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2011                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2012                                             grc_local_ctrl, 100);
2013                         }
2014                 }
2015         } else {
2016                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2017                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2018                         if (tp_peer != tp &&
2019                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2020                                 return;
2021
2022                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2023                                     (GRC_LCLCTRL_GPIO_OE1 |
2024                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2025
2026                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2027                                     GRC_LCLCTRL_GPIO_OE1, 100);
2028
2029                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2030                                     (GRC_LCLCTRL_GPIO_OE1 |
2031                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2032                 }
2033         }
2034 }
2035
2036 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2037 {
2038         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2039                 return 1;
2040         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2041                 if (speed != SPEED_10)
2042                         return 1;
2043         } else if (speed == SPEED_10)
2044                 return 1;
2045
2046         return 0;
2047 }
2048
2049 static int tg3_setup_phy(struct tg3 *, int);
2050
2051 #define RESET_KIND_SHUTDOWN     0
2052 #define RESET_KIND_INIT         1
2053 #define RESET_KIND_SUSPEND      2
2054
2055 static void tg3_write_sig_post_reset(struct tg3 *, int);
2056 static int tg3_halt_cpu(struct tg3 *, u32);
2057 static int tg3_nvram_lock(struct tg3 *);
2058 static void tg3_nvram_unlock(struct tg3 *);
2059
2060 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2061 {
2062         u32 val;
2063
2064         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2065                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2066                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2067                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2068
2069                         sg_dig_ctrl |=
2070                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2071                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2072                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2073                 }
2074                 return;
2075         }
2076
2077         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2078                 tg3_bmcr_reset(tp);
2079                 val = tr32(GRC_MISC_CFG);
2080                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2081                 udelay(40);
2082                 return;
2083         } else if (do_low_power) {
2084                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2085                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2086
2087                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2088                              MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2089                              MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2090                              MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2091                              MII_TG3_AUXCTL_PCTL_VREG_11V);
2092         }
2093
2094         /* The PHY should not be powered down on some chips because
2095          * of bugs.
2096          */
2097         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2099             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2100              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2101                 return;
2102
2103         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2104             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2105                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2106                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2107                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2108                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2109         }
2110
2111         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2112 }
2113
2114 /* tp->lock is held. */
2115 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2116 {
2117         u32 addr_high, addr_low;
2118         int i;
2119
2120         addr_high = ((tp->dev->dev_addr[0] << 8) |
2121                      tp->dev->dev_addr[1]);
2122         addr_low = ((tp->dev->dev_addr[2] << 24) |
2123                     (tp->dev->dev_addr[3] << 16) |
2124                     (tp->dev->dev_addr[4] <<  8) |
2125                     (tp->dev->dev_addr[5] <<  0));
2126         for (i = 0; i < 4; i++) {
2127                 if (i == 1 && skip_mac_1)
2128                         continue;
2129                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2130                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2131         }
2132
2133         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2134             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2135                 for (i = 0; i < 12; i++) {
2136                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2137                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2138                 }
2139         }
2140
2141         addr_high = (tp->dev->dev_addr[0] +
2142                      tp->dev->dev_addr[1] +
2143                      tp->dev->dev_addr[2] +
2144                      tp->dev->dev_addr[3] +
2145                      tp->dev->dev_addr[4] +
2146                      tp->dev->dev_addr[5]) &
2147                 TX_BACKOFF_SEED_MASK;
2148         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2149 }
2150
2151 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2152 {
2153         u32 misc_host_ctrl;
2154         bool device_should_wake, do_low_power;
2155
2156         /* Make sure register accesses (indirect or otherwise)
2157          * will function correctly.
2158          */
2159         pci_write_config_dword(tp->pdev,
2160                                TG3PCI_MISC_HOST_CTRL,
2161                                tp->misc_host_ctrl);
2162
2163         switch (state) {
2164         case PCI_D0:
2165                 pci_enable_wake(tp->pdev, state, false);
2166                 pci_set_power_state(tp->pdev, PCI_D0);
2167
2168                 /* Switch out of Vaux if it is a NIC */
2169                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2170                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2171
2172                 return 0;
2173
2174         case PCI_D1:
2175         case PCI_D2:
2176         case PCI_D3hot:
2177                 break;
2178
2179         default:
2180                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2181                         tp->dev->name, state);
2182                 return -EINVAL;
2183         }
2184
2185         /* Restore the CLKREQ setting. */
2186         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2187                 u16 lnkctl;
2188
2189                 pci_read_config_word(tp->pdev,
2190                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2191                                      &lnkctl);
2192                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2193                 pci_write_config_word(tp->pdev,
2194                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2195                                       lnkctl);
2196         }
2197
2198         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2199         tw32(TG3PCI_MISC_HOST_CTRL,
2200              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2201
2202         device_should_wake = pci_pme_capable(tp->pdev, state) &&
2203                              device_may_wakeup(&tp->pdev->dev) &&
2204                              (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2205
2206         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2207                 do_low_power = false;
2208                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2209                     !tp->link_config.phy_is_low_power) {
2210                         struct phy_device *phydev;
2211                         u32 phyid, advertising;
2212
2213                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2214
2215                         tp->link_config.phy_is_low_power = 1;
2216
2217                         tp->link_config.orig_speed = phydev->speed;
2218                         tp->link_config.orig_duplex = phydev->duplex;
2219                         tp->link_config.orig_autoneg = phydev->autoneg;
2220                         tp->link_config.orig_advertising = phydev->advertising;
2221
2222                         advertising = ADVERTISED_TP |
2223                                       ADVERTISED_Pause |
2224                                       ADVERTISED_Autoneg |
2225                                       ADVERTISED_10baseT_Half;
2226
2227                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2228                             device_should_wake) {
2229                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2230                                         advertising |=
2231                                                 ADVERTISED_100baseT_Half |
2232                                                 ADVERTISED_100baseT_Full |
2233                                                 ADVERTISED_10baseT_Full;
2234                                 else
2235                                         advertising |= ADVERTISED_10baseT_Full;
2236                         }
2237
2238                         phydev->advertising = advertising;
2239
2240                         phy_start_aneg(phydev);
2241
2242                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2243                         if (phyid != TG3_PHY_ID_BCMAC131) {
2244                                 phyid &= TG3_PHY_OUI_MASK;
2245                                 if (phyid == TG3_PHY_OUI_1 &&
2246                                     phyid == TG3_PHY_OUI_2 &&
2247                                     phyid == TG3_PHY_OUI_3)
2248                                         do_low_power = true;
2249                         }
2250                 }
2251         } else {
2252                 do_low_power = false;
2253
2254                 if (tp->link_config.phy_is_low_power == 0) {
2255                         tp->link_config.phy_is_low_power = 1;
2256                         tp->link_config.orig_speed = tp->link_config.speed;
2257                         tp->link_config.orig_duplex = tp->link_config.duplex;
2258                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2259                 }
2260
2261                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2262                         tp->link_config.speed = SPEED_10;
2263                         tp->link_config.duplex = DUPLEX_HALF;
2264                         tp->link_config.autoneg = AUTONEG_ENABLE;
2265                         tg3_setup_phy(tp, 0);
2266                 }
2267         }
2268
2269         __tg3_set_mac_addr(tp, 0);
2270
2271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2272                 u32 val;
2273
2274                 val = tr32(GRC_VCPU_EXT_CTRL);
2275                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2276         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2277                 int i;
2278                 u32 val;
2279
2280                 for (i = 0; i < 200; i++) {
2281                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2282                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2283                                 break;
2284                         msleep(1);
2285                 }
2286         }
2287         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2288                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2289                                                      WOL_DRV_STATE_SHUTDOWN |
2290                                                      WOL_DRV_WOL |
2291                                                      WOL_SET_MAGIC_PKT);
2292
2293         if (device_should_wake) {
2294                 u32 mac_mode;
2295
2296                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2297                         if (do_low_power) {
2298                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2299                                 udelay(40);
2300                         }
2301
2302                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2303                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2304                         else
2305                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2306
2307                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2308                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2309                             ASIC_REV_5700) {
2310                                 u32 speed = (tp->tg3_flags &
2311                                              TG3_FLAG_WOL_SPEED_100MB) ?
2312                                              SPEED_100 : SPEED_10;
2313                                 if (tg3_5700_link_polarity(tp, speed))
2314                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2315                                 else
2316                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2317                         }
2318                 } else {
2319                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2320                 }
2321
2322                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2323                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2324
2325                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2326                 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2327                     !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2328                     ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2329                      (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2330                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2331
2332                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2333                         mac_mode |= tp->mac_mode &
2334                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2335                         if (mac_mode & MAC_MODE_APE_TX_EN)
2336                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2337                 }
2338
2339                 tw32_f(MAC_MODE, mac_mode);
2340                 udelay(100);
2341
2342                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2343                 udelay(10);
2344         }
2345
2346         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2347             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2348              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2349                 u32 base_val;
2350
2351                 base_val = tp->pci_clock_ctrl;
2352                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2353                              CLOCK_CTRL_TXCLK_DISABLE);
2354
2355                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2356                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2357         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2358                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2359                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2360                 /* do nothing */
2361         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2362                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2363                 u32 newbits1, newbits2;
2364
2365                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2366                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2367                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2368                                     CLOCK_CTRL_TXCLK_DISABLE |
2369                                     CLOCK_CTRL_ALTCLK);
2370                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2371                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2372                         newbits1 = CLOCK_CTRL_625_CORE;
2373                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2374                 } else {
2375                         newbits1 = CLOCK_CTRL_ALTCLK;
2376                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2377                 }
2378
2379                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2380                             40);
2381
2382                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2383                             40);
2384
2385                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2386                         u32 newbits3;
2387
2388                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2389                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2390                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2391                                             CLOCK_CTRL_TXCLK_DISABLE |
2392                                             CLOCK_CTRL_44MHZ_CORE);
2393                         } else {
2394                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2395                         }
2396
2397                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2398                                     tp->pci_clock_ctrl | newbits3, 40);
2399                 }
2400         }
2401
2402         if (!(device_should_wake) &&
2403             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2404             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2405                 tg3_power_down_phy(tp, do_low_power);
2406
2407         tg3_frob_aux_power(tp);
2408
2409         /* Workaround for unstable PLL clock */
2410         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2411             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2412                 u32 val = tr32(0x7d00);
2413
2414                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2415                 tw32(0x7d00, val);
2416                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2417                         int err;
2418
2419                         err = tg3_nvram_lock(tp);
2420                         tg3_halt_cpu(tp, RX_CPU_BASE);
2421                         if (!err)
2422                                 tg3_nvram_unlock(tp);
2423                 }
2424         }
2425
2426         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2427
2428         if (device_should_wake)
2429                 pci_enable_wake(tp->pdev, state, true);
2430
2431         /* Finally, set the new power state. */
2432         pci_set_power_state(tp->pdev, state);
2433
2434         return 0;
2435 }
2436
2437 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2438 {
2439         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2440         case MII_TG3_AUX_STAT_10HALF:
2441                 *speed = SPEED_10;
2442                 *duplex = DUPLEX_HALF;
2443                 break;
2444
2445         case MII_TG3_AUX_STAT_10FULL:
2446                 *speed = SPEED_10;
2447                 *duplex = DUPLEX_FULL;
2448                 break;
2449
2450         case MII_TG3_AUX_STAT_100HALF:
2451                 *speed = SPEED_100;
2452                 *duplex = DUPLEX_HALF;
2453                 break;
2454
2455         case MII_TG3_AUX_STAT_100FULL:
2456                 *speed = SPEED_100;
2457                 *duplex = DUPLEX_FULL;
2458                 break;
2459
2460         case MII_TG3_AUX_STAT_1000HALF:
2461                 *speed = SPEED_1000;
2462                 *duplex = DUPLEX_HALF;
2463                 break;
2464
2465         case MII_TG3_AUX_STAT_1000FULL:
2466                 *speed = SPEED_1000;
2467                 *duplex = DUPLEX_FULL;
2468                 break;
2469
2470         default:
2471                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2472                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2473                                  SPEED_10;
2474                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2475                                   DUPLEX_HALF;
2476                         break;
2477                 }
2478                 *speed = SPEED_INVALID;
2479                 *duplex = DUPLEX_INVALID;
2480                 break;
2481         }
2482 }
2483
2484 static void tg3_phy_copper_begin(struct tg3 *tp)
2485 {
2486         u32 new_adv;
2487         int i;
2488
2489         if (tp->link_config.phy_is_low_power) {
2490                 /* Entering low power mode.  Disable gigabit and
2491                  * 100baseT advertisements.
2492                  */
2493                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2494
2495                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2496                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2497                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2498                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2499
2500                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2501         } else if (tp->link_config.speed == SPEED_INVALID) {
2502                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2503                         tp->link_config.advertising &=
2504                                 ~(ADVERTISED_1000baseT_Half |
2505                                   ADVERTISED_1000baseT_Full);
2506
2507                 new_adv = ADVERTISE_CSMA;
2508                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2509                         new_adv |= ADVERTISE_10HALF;
2510                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2511                         new_adv |= ADVERTISE_10FULL;
2512                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2513                         new_adv |= ADVERTISE_100HALF;
2514                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2515                         new_adv |= ADVERTISE_100FULL;
2516
2517                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2518
2519                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2520
2521                 if (tp->link_config.advertising &
2522                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2523                         new_adv = 0;
2524                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2525                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2526                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2527                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2528                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2529                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2530                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2531                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2532                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2533                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2534                 } else {
2535                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2536                 }
2537         } else {
2538                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2539                 new_adv |= ADVERTISE_CSMA;
2540
2541                 /* Asking for a specific link mode. */
2542                 if (tp->link_config.speed == SPEED_1000) {
2543                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2544
2545                         if (tp->link_config.duplex == DUPLEX_FULL)
2546                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2547                         else
2548                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2549                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2550                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2551                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2552                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2553                 } else {
2554                         if (tp->link_config.speed == SPEED_100) {
2555                                 if (tp->link_config.duplex == DUPLEX_FULL)
2556                                         new_adv |= ADVERTISE_100FULL;
2557                                 else
2558                                         new_adv |= ADVERTISE_100HALF;
2559                         } else {
2560                                 if (tp->link_config.duplex == DUPLEX_FULL)
2561                                         new_adv |= ADVERTISE_10FULL;
2562                                 else
2563                                         new_adv |= ADVERTISE_10HALF;
2564                         }
2565                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2566
2567                         new_adv = 0;
2568                 }
2569
2570                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2571         }
2572
2573         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2574             tp->link_config.speed != SPEED_INVALID) {
2575                 u32 bmcr, orig_bmcr;
2576
2577                 tp->link_config.active_speed = tp->link_config.speed;
2578                 tp->link_config.active_duplex = tp->link_config.duplex;
2579
2580                 bmcr = 0;
2581                 switch (tp->link_config.speed) {
2582                 default:
2583                 case SPEED_10:
2584                         break;
2585
2586                 case SPEED_100:
2587                         bmcr |= BMCR_SPEED100;
2588                         break;
2589
2590                 case SPEED_1000:
2591                         bmcr |= TG3_BMCR_SPEED1000;
2592                         break;
2593                 }
2594
2595                 if (tp->link_config.duplex == DUPLEX_FULL)
2596                         bmcr |= BMCR_FULLDPLX;
2597
2598                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2599                     (bmcr != orig_bmcr)) {
2600                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2601                         for (i = 0; i < 1500; i++) {
2602                                 u32 tmp;
2603
2604                                 udelay(10);
2605                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2606                                     tg3_readphy(tp, MII_BMSR, &tmp))
2607                                         continue;
2608                                 if (!(tmp & BMSR_LSTATUS)) {
2609                                         udelay(40);
2610                                         break;
2611                                 }
2612                         }
2613                         tg3_writephy(tp, MII_BMCR, bmcr);
2614                         udelay(40);
2615                 }
2616         } else {
2617                 tg3_writephy(tp, MII_BMCR,
2618                              BMCR_ANENABLE | BMCR_ANRESTART);
2619         }
2620 }
2621
2622 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2623 {
2624         int err;
2625
2626         /* Turn off tap power management. */
2627         /* Set Extended packet length bit */
2628         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2629
2630         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2631         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2632
2633         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2634         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2635
2636         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2637         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2638
2639         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2640         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2641
2642         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2643         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2644
2645         udelay(40);
2646
2647         return err;
2648 }
2649
2650 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2651 {
2652         u32 adv_reg, all_mask = 0;
2653
2654         if (mask & ADVERTISED_10baseT_Half)
2655                 all_mask |= ADVERTISE_10HALF;
2656         if (mask & ADVERTISED_10baseT_Full)
2657                 all_mask |= ADVERTISE_10FULL;
2658         if (mask & ADVERTISED_100baseT_Half)
2659                 all_mask |= ADVERTISE_100HALF;
2660         if (mask & ADVERTISED_100baseT_Full)
2661                 all_mask |= ADVERTISE_100FULL;
2662
2663         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2664                 return 0;
2665
2666         if ((adv_reg & all_mask) != all_mask)
2667                 return 0;
2668         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2669                 u32 tg3_ctrl;
2670
2671                 all_mask = 0;
2672                 if (mask & ADVERTISED_1000baseT_Half)
2673                         all_mask |= ADVERTISE_1000HALF;
2674                 if (mask & ADVERTISED_1000baseT_Full)
2675                         all_mask |= ADVERTISE_1000FULL;
2676
2677                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2678                         return 0;
2679
2680                 if ((tg3_ctrl & all_mask) != all_mask)
2681                         return 0;
2682         }
2683         return 1;
2684 }
2685
2686 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2687 {
2688         u32 curadv, reqadv;
2689
2690         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2691                 return 1;
2692
2693         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2694         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2695
2696         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2697                 if (curadv != reqadv)
2698                         return 0;
2699
2700                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2701                         tg3_readphy(tp, MII_LPA, rmtadv);
2702         } else {
2703                 /* Reprogram the advertisement register, even if it
2704                  * does not affect the current link.  If the link
2705                  * gets renegotiated in the future, we can save an
2706                  * additional renegotiation cycle by advertising
2707                  * it correctly in the first place.
2708                  */
2709                 if (curadv != reqadv) {
2710                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2711                                      ADVERTISE_PAUSE_ASYM);
2712                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2713                 }
2714         }
2715
2716         return 1;
2717 }
2718
2719 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2720 {
2721         int current_link_up;
2722         u32 bmsr, dummy;
2723         u32 lcl_adv, rmt_adv;
2724         u16 current_speed;
2725         u8 current_duplex;
2726         int i, err;
2727
2728         tw32(MAC_EVENT, 0);
2729
2730         tw32_f(MAC_STATUS,
2731              (MAC_STATUS_SYNC_CHANGED |
2732               MAC_STATUS_CFG_CHANGED |
2733               MAC_STATUS_MI_COMPLETION |
2734               MAC_STATUS_LNKSTATE_CHANGED));
2735         udelay(40);
2736
2737         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2738                 tw32_f(MAC_MI_MODE,
2739                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2740                 udelay(80);
2741         }
2742
2743         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2744
2745         /* Some third-party PHYs need to be reset on link going
2746          * down.
2747          */
2748         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2749              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2750              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2751             netif_carrier_ok(tp->dev)) {
2752                 tg3_readphy(tp, MII_BMSR, &bmsr);
2753                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2754                     !(bmsr & BMSR_LSTATUS))
2755                         force_reset = 1;
2756         }
2757         if (force_reset)
2758                 tg3_phy_reset(tp);
2759
2760         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2761                 tg3_readphy(tp, MII_BMSR, &bmsr);
2762                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2763                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2764                         bmsr = 0;
2765
2766                 if (!(bmsr & BMSR_LSTATUS)) {
2767                         err = tg3_init_5401phy_dsp(tp);
2768                         if (err)
2769                                 return err;
2770
2771                         tg3_readphy(tp, MII_BMSR, &bmsr);
2772                         for (i = 0; i < 1000; i++) {
2773                                 udelay(10);
2774                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2775                                     (bmsr & BMSR_LSTATUS)) {
2776                                         udelay(40);
2777                                         break;
2778                                 }
2779                         }
2780
2781                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2782                             !(bmsr & BMSR_LSTATUS) &&
2783                             tp->link_config.active_speed == SPEED_1000) {
2784                                 err = tg3_phy_reset(tp);
2785                                 if (!err)
2786                                         err = tg3_init_5401phy_dsp(tp);
2787                                 if (err)
2788                                         return err;
2789                         }
2790                 }
2791         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2792                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2793                 /* 5701 {A0,B0} CRC bug workaround */
2794                 tg3_writephy(tp, 0x15, 0x0a75);
2795                 tg3_writephy(tp, 0x1c, 0x8c68);
2796                 tg3_writephy(tp, 0x1c, 0x8d68);
2797                 tg3_writephy(tp, 0x1c, 0x8c68);
2798         }
2799
2800         /* Clear pending interrupts... */
2801         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2802         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2803
2804         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2805                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2806         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2807                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2808
2809         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2810             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2811                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2812                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2813                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2814                 else
2815                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2816         }
2817
2818         current_link_up = 0;
2819         current_speed = SPEED_INVALID;
2820         current_duplex = DUPLEX_INVALID;
2821
2822         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2823                 u32 val;
2824
2825                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2826                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2827                 if (!(val & (1 << 10))) {
2828                         val |= (1 << 10);
2829                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2830                         goto relink;
2831                 }
2832         }
2833
2834         bmsr = 0;
2835         for (i = 0; i < 100; i++) {
2836                 tg3_readphy(tp, MII_BMSR, &bmsr);
2837                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2838                     (bmsr & BMSR_LSTATUS))
2839                         break;
2840                 udelay(40);
2841         }
2842
2843         if (bmsr & BMSR_LSTATUS) {
2844                 u32 aux_stat, bmcr;
2845
2846                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2847                 for (i = 0; i < 2000; i++) {
2848                         udelay(10);
2849                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2850                             aux_stat)
2851                                 break;
2852                 }
2853
2854                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2855                                              &current_speed,
2856                                              &current_duplex);
2857
2858                 bmcr = 0;
2859                 for (i = 0; i < 200; i++) {
2860                         tg3_readphy(tp, MII_BMCR, &bmcr);
2861                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2862                                 continue;
2863                         if (bmcr && bmcr != 0x7fff)
2864                                 break;
2865                         udelay(10);
2866                 }
2867
2868                 lcl_adv = 0;
2869                 rmt_adv = 0;
2870
2871                 tp->link_config.active_speed = current_speed;
2872                 tp->link_config.active_duplex = current_duplex;
2873
2874                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2875                         if ((bmcr & BMCR_ANENABLE) &&
2876                             tg3_copper_is_advertising_all(tp,
2877                                                 tp->link_config.advertising)) {
2878                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2879                                                                   &rmt_adv))
2880                                         current_link_up = 1;
2881                         }
2882                 } else {
2883                         if (!(bmcr & BMCR_ANENABLE) &&
2884                             tp->link_config.speed == current_speed &&
2885                             tp->link_config.duplex == current_duplex &&
2886                             tp->link_config.flowctrl ==
2887                             tp->link_config.active_flowctrl) {
2888                                 current_link_up = 1;
2889                         }
2890                 }
2891
2892                 if (current_link_up == 1 &&
2893                     tp->link_config.active_duplex == DUPLEX_FULL)
2894                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2895         }
2896
2897 relink:
2898         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2899                 u32 tmp;
2900
2901                 tg3_phy_copper_begin(tp);
2902
2903                 tg3_readphy(tp, MII_BMSR, &tmp);
2904                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2905                     (tmp & BMSR_LSTATUS))
2906                         current_link_up = 1;
2907         }
2908
2909         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2910         if (current_link_up == 1) {
2911                 if (tp->link_config.active_speed == SPEED_100 ||
2912                     tp->link_config.active_speed == SPEED_10)
2913                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2914                 else
2915                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2916         } else
2917                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2918
2919         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2920         if (tp->link_config.active_duplex == DUPLEX_HALF)
2921                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2922
2923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2924                 if (current_link_up == 1 &&
2925                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2926                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2927                 else
2928                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2929         }
2930
2931         /* ??? Without this setting Netgear GA302T PHY does not
2932          * ??? send/receive packets...
2933          */
2934         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2935             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2936                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2937                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2938                 udelay(80);
2939         }
2940
2941         tw32_f(MAC_MODE, tp->mac_mode);
2942         udelay(40);
2943
2944         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2945                 /* Polled via timer. */
2946                 tw32_f(MAC_EVENT, 0);
2947         } else {
2948                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2949         }
2950         udelay(40);
2951
2952         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2953             current_link_up == 1 &&
2954             tp->link_config.active_speed == SPEED_1000 &&
2955             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2956              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2957                 udelay(120);
2958                 tw32_f(MAC_STATUS,
2959                      (MAC_STATUS_SYNC_CHANGED |
2960                       MAC_STATUS_CFG_CHANGED));
2961                 udelay(40);
2962                 tg3_write_mem(tp,
2963                               NIC_SRAM_FIRMWARE_MBOX,
2964                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2965         }
2966
2967         /* Prevent send BD corruption. */
2968         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2969                 u16 oldlnkctl, newlnkctl;
2970
2971                 pci_read_config_word(tp->pdev,
2972                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2973                                      &oldlnkctl);
2974                 if (tp->link_config.active_speed == SPEED_100 ||
2975                     tp->link_config.active_speed == SPEED_10)
2976                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
2977                 else
2978                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
2979                 if (newlnkctl != oldlnkctl)
2980                         pci_write_config_word(tp->pdev,
2981                                               tp->pcie_cap + PCI_EXP_LNKCTL,
2982                                               newlnkctl);
2983         }
2984
2985         if (current_link_up != netif_carrier_ok(tp->dev)) {
2986                 if (current_link_up)
2987                         netif_carrier_on(tp->dev);
2988                 else
2989                         netif_carrier_off(tp->dev);
2990                 tg3_link_report(tp);
2991         }
2992
2993         return 0;
2994 }
2995
2996 struct tg3_fiber_aneginfo {
2997         int state;
2998 #define ANEG_STATE_UNKNOWN              0
2999 #define ANEG_STATE_AN_ENABLE            1
3000 #define ANEG_STATE_RESTART_INIT         2
3001 #define ANEG_STATE_RESTART              3
3002 #define ANEG_STATE_DISABLE_LINK_OK      4
3003 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3004 #define ANEG_STATE_ABILITY_DETECT       6
3005 #define ANEG_STATE_ACK_DETECT_INIT      7
3006 #define ANEG_STATE_ACK_DETECT           8
3007 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3008 #define ANEG_STATE_COMPLETE_ACK         10
3009 #define ANEG_STATE_IDLE_DETECT_INIT     11
3010 #define ANEG_STATE_IDLE_DETECT          12
3011 #define ANEG_STATE_LINK_OK              13
3012 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3013 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3014
3015         u32 flags;
3016 #define MR_AN_ENABLE            0x00000001
3017 #define MR_RESTART_AN           0x00000002
3018 #define MR_AN_COMPLETE          0x00000004
3019 #define MR_PAGE_RX              0x00000008
3020 #define MR_NP_LOADED            0x00000010
3021 #define MR_TOGGLE_TX            0x00000020
3022 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3023 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3024 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3025 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3026 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3027 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3028 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3029 #define MR_TOGGLE_RX            0x00002000
3030 #define MR_NP_RX                0x00004000
3031
3032 #define MR_LINK_OK              0x80000000
3033
3034         unsigned long link_time, cur_time;
3035
3036         u32 ability_match_cfg;
3037         int ability_match_count;
3038
3039         char ability_match, idle_match, ack_match;
3040
3041         u32 txconfig, rxconfig;
3042 #define ANEG_CFG_NP             0x00000080
3043 #define ANEG_CFG_ACK            0x00000040
3044 #define ANEG_CFG_RF2            0x00000020
3045 #define ANEG_CFG_RF1            0x00000010
3046 #define ANEG_CFG_PS2            0x00000001
3047 #define ANEG_CFG_PS1            0x00008000
3048 #define ANEG_CFG_HD             0x00004000
3049 #define ANEG_CFG_FD             0x00002000
3050 #define ANEG_CFG_INVAL          0x00001f06
3051
3052 };
3053 #define ANEG_OK         0
3054 #define ANEG_DONE       1
3055 #define ANEG_TIMER_ENAB 2
3056 #define ANEG_FAILED     -1
3057
3058 #define ANEG_STATE_SETTLE_TIME  10000
3059
3060 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3061                                    struct tg3_fiber_aneginfo *ap)
3062 {
3063         u16 flowctrl;
3064         unsigned long delta;
3065         u32 rx_cfg_reg;
3066         int ret;
3067
3068         if (ap->state == ANEG_STATE_UNKNOWN) {
3069                 ap->rxconfig = 0;
3070                 ap->link_time = 0;
3071                 ap->cur_time = 0;
3072                 ap->ability_match_cfg = 0;
3073                 ap->ability_match_count = 0;
3074                 ap->ability_match = 0;
3075                 ap->idle_match = 0;
3076                 ap->ack_match = 0;
3077         }
3078         ap->cur_time++;
3079
3080         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3081                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3082
3083                 if (rx_cfg_reg != ap->ability_match_cfg) {
3084                         ap->ability_match_cfg = rx_cfg_reg;
3085                         ap->ability_match = 0;
3086                         ap->ability_match_count = 0;
3087                 } else {
3088                         if (++ap->ability_match_count > 1) {
3089                                 ap->ability_match = 1;
3090                                 ap->ability_match_cfg = rx_cfg_reg;
3091                         }
3092                 }
3093                 if (rx_cfg_reg & ANEG_CFG_ACK)
3094                         ap->ack_match = 1;
3095                 else
3096                         ap->ack_match = 0;
3097
3098                 ap->idle_match = 0;
3099         } else {
3100                 ap->idle_match = 1;
3101                 ap->ability_match_cfg = 0;
3102                 ap->ability_match_count = 0;
3103                 ap->ability_match = 0;
3104                 ap->ack_match = 0;
3105
3106                 rx_cfg_reg = 0;
3107         }
3108
3109         ap->rxconfig = rx_cfg_reg;
3110         ret = ANEG_OK;
3111
3112         switch(ap->state) {
3113         case ANEG_STATE_UNKNOWN:
3114                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3115                         ap->state = ANEG_STATE_AN_ENABLE;
3116
3117                 /* fallthru */
3118         case ANEG_STATE_AN_ENABLE:
3119                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3120                 if (ap->flags & MR_AN_ENABLE) {
3121                         ap->link_time = 0;
3122                         ap->cur_time = 0;
3123                         ap->ability_match_cfg = 0;
3124                         ap->ability_match_count = 0;
3125                         ap->ability_match = 0;
3126                         ap->idle_match = 0;
3127                         ap->ack_match = 0;
3128
3129                         ap->state = ANEG_STATE_RESTART_INIT;
3130                 } else {
3131                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3132                 }
3133                 break;
3134
3135         case ANEG_STATE_RESTART_INIT:
3136                 ap->link_time = ap->cur_time;
3137                 ap->flags &= ~(MR_NP_LOADED);
3138                 ap->txconfig = 0;
3139                 tw32(MAC_TX_AUTO_NEG, 0);
3140                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3141                 tw32_f(MAC_MODE, tp->mac_mode);
3142                 udelay(40);
3143
3144                 ret = ANEG_TIMER_ENAB;
3145                 ap->state = ANEG_STATE_RESTART;
3146
3147                 /* fallthru */
3148         case ANEG_STATE_RESTART:
3149                 delta = ap->cur_time - ap->link_time;
3150                 if (delta > ANEG_STATE_SETTLE_TIME) {
3151                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3152                 } else {
3153                         ret = ANEG_TIMER_ENAB;
3154                 }
3155                 break;
3156
3157         case ANEG_STATE_DISABLE_LINK_OK:
3158                 ret = ANEG_DONE;
3159                 break;
3160
3161         case ANEG_STATE_ABILITY_DETECT_INIT:
3162                 ap->flags &= ~(MR_TOGGLE_TX);
3163                 ap->txconfig = ANEG_CFG_FD;
3164                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3165                 if (flowctrl & ADVERTISE_1000XPAUSE)
3166                         ap->txconfig |= ANEG_CFG_PS1;
3167                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3168                         ap->txconfig |= ANEG_CFG_PS2;
3169                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3170                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3171                 tw32_f(MAC_MODE, tp->mac_mode);
3172                 udelay(40);
3173
3174                 ap->state = ANEG_STATE_ABILITY_DETECT;
3175                 break;
3176
3177         case ANEG_STATE_ABILITY_DETECT:
3178                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3179                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3180                 }
3181                 break;
3182
3183         case ANEG_STATE_ACK_DETECT_INIT:
3184                 ap->txconfig |= ANEG_CFG_ACK;
3185                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3186                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3187                 tw32_f(MAC_MODE, tp->mac_mode);
3188                 udelay(40);
3189
3190                 ap->state = ANEG_STATE_ACK_DETECT;
3191
3192                 /* fallthru */
3193         case ANEG_STATE_ACK_DETECT:
3194                 if (ap->ack_match != 0) {
3195                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3196                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3197                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3198                         } else {
3199                                 ap->state = ANEG_STATE_AN_ENABLE;
3200                         }
3201                 } else if (ap->ability_match != 0 &&
3202                            ap->rxconfig == 0) {
3203                         ap->state = ANEG_STATE_AN_ENABLE;
3204                 }
3205                 break;
3206
3207         case ANEG_STATE_COMPLETE_ACK_INIT:
3208                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3209                         ret = ANEG_FAILED;
3210                         break;
3211                 }
3212                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3213                                MR_LP_ADV_HALF_DUPLEX |
3214                                MR_LP_ADV_SYM_PAUSE |
3215                                MR_LP_ADV_ASYM_PAUSE |
3216                                MR_LP_ADV_REMOTE_FAULT1 |
3217                                MR_LP_ADV_REMOTE_FAULT2 |
3218                                MR_LP_ADV_NEXT_PAGE |
3219                                MR_TOGGLE_RX |
3220                                MR_NP_RX);
3221                 if (ap->rxconfig & ANEG_CFG_FD)
3222                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3223                 if (ap->rxconfig & ANEG_CFG_HD)
3224                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3225                 if (ap->rxconfig & ANEG_CFG_PS1)
3226                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3227                 if (ap->rxconfig & ANEG_CFG_PS2)
3228                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3229                 if (ap->rxconfig & ANEG_CFG_RF1)
3230                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3231                 if (ap->rxconfig & ANEG_CFG_RF2)
3232                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3233                 if (ap->rxconfig & ANEG_CFG_NP)
3234                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3235
3236                 ap->link_time = ap->cur_time;
3237
3238                 ap->flags ^= (MR_TOGGLE_TX);
3239                 if (ap->rxconfig & 0x0008)
3240                         ap->flags |= MR_TOGGLE_RX;
3241                 if (ap->rxconfig & ANEG_CFG_NP)
3242                         ap->flags |= MR_NP_RX;
3243                 ap->flags |= MR_PAGE_RX;
3244
3245                 ap->state = ANEG_STATE_COMPLETE_ACK;
3246                 ret = ANEG_TIMER_ENAB;
3247                 break;
3248
3249         case ANEG_STATE_COMPLETE_ACK:
3250                 if (ap->ability_match != 0 &&
3251                     ap->rxconfig == 0) {
3252                         ap->state = ANEG_STATE_AN_ENABLE;
3253                         break;
3254                 }
3255                 delta = ap->cur_time - ap->link_time;
3256                 if (delta > ANEG_STATE_SETTLE_TIME) {
3257                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3258                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3259                         } else {
3260                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3261                                     !(ap->flags & MR_NP_RX)) {
3262                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3263                                 } else {
3264                                         ret = ANEG_FAILED;
3265                                 }
3266                         }
3267                 }
3268                 break;
3269
3270         case ANEG_STATE_IDLE_DETECT_INIT:
3271                 ap->link_time = ap->cur_time;
3272                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3273                 tw32_f(MAC_MODE, tp->mac_mode);
3274                 udelay(40);
3275
3276                 ap->state = ANEG_STATE_IDLE_DETECT;
3277                 ret = ANEG_TIMER_ENAB;
3278                 break;
3279
3280         case ANEG_STATE_IDLE_DETECT:
3281                 if (ap->ability_match != 0 &&
3282                     ap->rxconfig == 0) {
3283                         ap->state = ANEG_STATE_AN_ENABLE;
3284                         break;
3285                 }
3286                 delta = ap->cur_time - ap->link_time;
3287                 if (delta > ANEG_STATE_SETTLE_TIME) {
3288                         /* XXX another gem from the Broadcom driver :( */
3289                         ap->state = ANEG_STATE_LINK_OK;
3290                 }
3291                 break;
3292
3293         case ANEG_STATE_LINK_OK:
3294                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3295                 ret = ANEG_DONE;
3296                 break;
3297
3298         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3299                 /* ??? unimplemented */
3300                 break;
3301
3302         case ANEG_STATE_NEXT_PAGE_WAIT:
3303                 /* ??? unimplemented */
3304                 break;
3305
3306         default:
3307                 ret = ANEG_FAILED;
3308                 break;
3309         }
3310
3311         return ret;
3312 }
3313
3314 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3315 {
3316         int res = 0;
3317         struct tg3_fiber_aneginfo aninfo;
3318         int status = ANEG_FAILED;
3319         unsigned int tick;
3320         u32 tmp;
3321
3322         tw32_f(MAC_TX_AUTO_NEG, 0);
3323
3324         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3325         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3326         udelay(40);
3327
3328         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3329         udelay(40);
3330
3331         memset(&aninfo, 0, sizeof(aninfo));
3332         aninfo.flags |= MR_AN_ENABLE;
3333         aninfo.state = ANEG_STATE_UNKNOWN;
3334         aninfo.cur_time = 0;
3335         tick = 0;
3336         while (++tick < 195000) {
3337                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3338                 if (status == ANEG_DONE || status == ANEG_FAILED)
3339                         break;
3340
3341                 udelay(1);
3342         }
3343
3344         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3345         tw32_f(MAC_MODE, tp->mac_mode);
3346         udelay(40);
3347
3348         *txflags = aninfo.txconfig;
3349         *rxflags = aninfo.flags;
3350
3351         if (status == ANEG_DONE &&
3352             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3353                              MR_LP_ADV_FULL_DUPLEX)))
3354                 res = 1;
3355
3356         return res;
3357 }
3358
3359 static void tg3_init_bcm8002(struct tg3 *tp)
3360 {
3361         u32 mac_status = tr32(MAC_STATUS);
3362         int i;
3363
3364         /* Reset when initting first time or we have a link. */
3365         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3366             !(mac_status & MAC_STATUS_PCS_SYNCED))
3367                 return;
3368
3369         /* Set PLL lock range. */
3370         tg3_writephy(tp, 0x16, 0x8007);
3371
3372         /* SW reset */
3373         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3374
3375         /* Wait for reset to complete. */
3376         /* XXX schedule_timeout() ... */
3377         for (i = 0; i < 500; i++)
3378                 udelay(10);
3379
3380         /* Config mode; select PMA/Ch 1 regs. */
3381         tg3_writephy(tp, 0x10, 0x8411);
3382
3383         /* Enable auto-lock and comdet, select txclk for tx. */
3384         tg3_writephy(tp, 0x11, 0x0a10);
3385
3386         tg3_writephy(tp, 0x18, 0x00a0);
3387         tg3_writephy(tp, 0x16, 0x41ff);
3388
3389         /* Assert and deassert POR. */
3390         tg3_writephy(tp, 0x13, 0x0400);
3391         udelay(40);
3392         tg3_writephy(tp, 0x13, 0x0000);
3393
3394         tg3_writephy(tp, 0x11, 0x0a50);
3395         udelay(40);
3396         tg3_writephy(tp, 0x11, 0x0a10);
3397
3398         /* Wait for signal to stabilize */
3399         /* XXX schedule_timeout() ... */
3400         for (i = 0; i < 15000; i++)
3401                 udelay(10);
3402
3403         /* Deselect the channel register so we can read the PHYID
3404          * later.
3405          */
3406         tg3_writephy(tp, 0x10, 0x8011);
3407 }
3408
3409 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3410 {
3411         u16 flowctrl;
3412         u32 sg_dig_ctrl, sg_dig_status;
3413         u32 serdes_cfg, expected_sg_dig_ctrl;
3414         int workaround, port_a;
3415         int current_link_up;
3416
3417         serdes_cfg = 0;
3418         expected_sg_dig_ctrl = 0;
3419         workaround = 0;
3420         port_a = 1;
3421         current_link_up = 0;
3422
3423         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3424             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3425                 workaround = 1;
3426                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3427                         port_a = 0;
3428
3429                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3430                 /* preserve bits 20-23 for voltage regulator */
3431                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3432         }
3433
3434         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3435
3436         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3437                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3438                         if (workaround) {
3439                                 u32 val = serdes_cfg;
3440
3441                                 if (port_a)
3442                                         val |= 0xc010000;
3443                                 else
3444                                         val |= 0x4010000;
3445                                 tw32_f(MAC_SERDES_CFG, val);
3446                         }
3447
3448                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3449                 }
3450                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3451                         tg3_setup_flow_control(tp, 0, 0);
3452                         current_link_up = 1;
3453                 }
3454                 goto out;
3455         }
3456
3457         /* Want auto-negotiation.  */
3458         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3459
3460         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3461         if (flowctrl & ADVERTISE_1000XPAUSE)
3462                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3463         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3464                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3465
3466         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3467                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3468                     tp->serdes_counter &&
3469                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3470                                     MAC_STATUS_RCVD_CFG)) ==
3471                      MAC_STATUS_PCS_SYNCED)) {
3472                         tp->serdes_counter--;
3473                         current_link_up = 1;
3474                         goto out;
3475                 }
3476 restart_autoneg:
3477                 if (workaround)
3478                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3479                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3480                 udelay(5);
3481                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3482
3483                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3484                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3485         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3486                                  MAC_STATUS_SIGNAL_DET)) {
3487                 sg_dig_status = tr32(SG_DIG_STATUS);
3488                 mac_status = tr32(MAC_STATUS);
3489
3490                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3491                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3492                         u32 local_adv = 0, remote_adv = 0;
3493
3494                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3495                                 local_adv |= ADVERTISE_1000XPAUSE;
3496                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3497                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3498
3499                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3500                                 remote_adv |= LPA_1000XPAUSE;
3501                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3502                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3503
3504                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3505                         current_link_up = 1;
3506                         tp->serdes_counter = 0;
3507                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3508                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3509                         if (tp->serdes_counter)
3510                                 tp->serdes_counter--;
3511                         else {
3512                                 if (workaround) {
3513                                         u32 val = serdes_cfg;
3514
3515                                         if (port_a)
3516                                                 val |= 0xc010000;
3517                                         else
3518                                                 val |= 0x4010000;
3519
3520                                         tw32_f(MAC_SERDES_CFG, val);
3521                                 }
3522
3523                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3524                                 udelay(40);
3525
3526                                 /* Link parallel detection - link is up */
3527                                 /* only if we have PCS_SYNC and not */
3528                                 /* receiving config code words */
3529                                 mac_status = tr32(MAC_STATUS);
3530                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3531                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3532                                         tg3_setup_flow_control(tp, 0, 0);
3533                                         current_link_up = 1;
3534                                         tp->tg3_flags2 |=
3535                                                 TG3_FLG2_PARALLEL_DETECT;
3536                                         tp->serdes_counter =
3537                                                 SERDES_PARALLEL_DET_TIMEOUT;
3538                                 } else
3539                                         goto restart_autoneg;
3540                         }
3541                 }
3542         } else {
3543                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3544                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3545         }
3546
3547 out:
3548         return current_link_up;
3549 }
3550
3551 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3552 {
3553         int current_link_up = 0;
3554
3555         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3556                 goto out;
3557
3558         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3559                 u32 txflags, rxflags;
3560                 int i;
3561
3562                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3563                         u32 local_adv = 0, remote_adv = 0;
3564
3565                         if (txflags & ANEG_CFG_PS1)
3566                                 local_adv |= ADVERTISE_1000XPAUSE;
3567                         if (txflags & ANEG_CFG_PS2)
3568                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3569
3570                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3571                                 remote_adv |= LPA_1000XPAUSE;
3572                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3573                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3574
3575                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3576
3577                         current_link_up = 1;
3578                 }
3579                 for (i = 0; i < 30; i++) {
3580                         udelay(20);
3581                         tw32_f(MAC_STATUS,
3582                                (MAC_STATUS_SYNC_CHANGED |
3583                                 MAC_STATUS_CFG_CHANGED));
3584                         udelay(40);
3585                         if ((tr32(MAC_STATUS) &
3586                              (MAC_STATUS_SYNC_CHANGED |
3587                               MAC_STATUS_CFG_CHANGED)) == 0)
3588                                 break;
3589                 }
3590
3591                 mac_status = tr32(MAC_STATUS);
3592                 if (current_link_up == 0 &&
3593                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3594                     !(mac_status & MAC_STATUS_RCVD_CFG))
3595                         current_link_up = 1;
3596         } else {
3597                 tg3_setup_flow_control(tp, 0, 0);
3598
3599                 /* Forcing 1000FD link up. */
3600                 current_link_up = 1;
3601
3602                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3603                 udelay(40);
3604
3605                 tw32_f(MAC_MODE, tp->mac_mode);
3606                 udelay(40);
3607         }
3608
3609 out:
3610         return current_link_up;
3611 }
3612
3613 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3614 {
3615         u32 orig_pause_cfg;
3616         u16 orig_active_speed;
3617         u8 orig_active_duplex;
3618         u32 mac_status;
3619         int current_link_up;
3620         int i;
3621
3622         orig_pause_cfg = tp->link_config.active_flowctrl;
3623         orig_active_speed = tp->link_config.active_speed;
3624         orig_active_duplex = tp->link_config.active_duplex;
3625
3626         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3627             netif_carrier_ok(tp->dev) &&
3628             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3629                 mac_status = tr32(MAC_STATUS);
3630                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3631                                MAC_STATUS_SIGNAL_DET |
3632                                MAC_STATUS_CFG_CHANGED |
3633                                MAC_STATUS_RCVD_CFG);
3634                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3635                                    MAC_STATUS_SIGNAL_DET)) {
3636                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3637                                             MAC_STATUS_CFG_CHANGED));
3638                         return 0;
3639                 }
3640         }
3641
3642         tw32_f(MAC_TX_AUTO_NEG, 0);
3643
3644         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3645         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3646         tw32_f(MAC_MODE, tp->mac_mode);
3647         udelay(40);
3648
3649         if (tp->phy_id == PHY_ID_BCM8002)
3650                 tg3_init_bcm8002(tp);
3651
3652         /* Enable link change event even when serdes polling.  */
3653         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3654         udelay(40);
3655
3656         current_link_up = 0;
3657         mac_status = tr32(MAC_STATUS);
3658
3659         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3660                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3661         else
3662                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3663
3664         tp->hw_status->status =
3665                 (SD_STATUS_UPDATED |
3666                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3667
3668         for (i = 0; i < 100; i++) {
3669                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3670                                     MAC_STATUS_CFG_CHANGED));
3671                 udelay(5);
3672                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3673                                          MAC_STATUS_CFG_CHANGED |
3674                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3675                         break;
3676         }
3677
3678         mac_status = tr32(MAC_STATUS);
3679         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3680                 current_link_up = 0;
3681                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3682                     tp->serdes_counter == 0) {
3683                         tw32_f(MAC_MODE, (tp->mac_mode |
3684                                           MAC_MODE_SEND_CONFIGS));
3685                         udelay(1);
3686                         tw32_f(MAC_MODE, tp->mac_mode);
3687                 }
3688         }
3689
3690         if (current_link_up == 1) {
3691                 tp->link_config.active_speed = SPEED_1000;
3692                 tp->link_config.active_duplex = DUPLEX_FULL;
3693                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3694                                     LED_CTRL_LNKLED_OVERRIDE |
3695                                     LED_CTRL_1000MBPS_ON));
3696         } else {
3697                 tp->link_config.active_speed = SPEED_INVALID;
3698                 tp->link_config.active_duplex = DUPLEX_INVALID;
3699                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3700                                     LED_CTRL_LNKLED_OVERRIDE |
3701                                     LED_CTRL_TRAFFIC_OVERRIDE));
3702         }
3703
3704         if (current_link_up != netif_carrier_ok(tp->dev)) {
3705                 if (current_link_up)
3706                         netif_carrier_on(tp->dev);
3707                 else
3708                         netif_carrier_off(tp->dev);
3709                 tg3_link_report(tp);
3710         } else {
3711                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3712                 if (orig_pause_cfg != now_pause_cfg ||
3713                     orig_active_speed != tp->link_config.active_speed ||
3714                     orig_active_duplex != tp->link_config.active_duplex)
3715                         tg3_link_report(tp);
3716         }
3717
3718         return 0;
3719 }
3720
3721 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3722 {
3723         int current_link_up, err = 0;
3724         u32 bmsr, bmcr;
3725         u16 current_speed;
3726         u8 current_duplex;
3727         u32 local_adv, remote_adv;
3728
3729         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3730         tw32_f(MAC_MODE, tp->mac_mode);
3731         udelay(40);
3732
3733         tw32(MAC_EVENT, 0);
3734
3735         tw32_f(MAC_STATUS,
3736              (MAC_STATUS_SYNC_CHANGED |
3737               MAC_STATUS_CFG_CHANGED |
3738               MAC_STATUS_MI_COMPLETION |
3739               MAC_STATUS_LNKSTATE_CHANGED));
3740         udelay(40);
3741
3742         if (force_reset)
3743                 tg3_phy_reset(tp);
3744
3745         current_link_up = 0;
3746         current_speed = SPEED_INVALID;
3747         current_duplex = DUPLEX_INVALID;
3748
3749         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3750         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3751         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3752                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3753                         bmsr |= BMSR_LSTATUS;
3754                 else
3755                         bmsr &= ~BMSR_LSTATUS;
3756         }
3757
3758         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3759
3760         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3761             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3762                 /* do nothing, just check for link up at the end */
3763         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3764                 u32 adv, new_adv;
3765
3766                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3767                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3768                                   ADVERTISE_1000XPAUSE |
3769                                   ADVERTISE_1000XPSE_ASYM |
3770                                   ADVERTISE_SLCT);
3771
3772                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3773
3774                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3775                         new_adv |= ADVERTISE_1000XHALF;
3776                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3777                         new_adv |= ADVERTISE_1000XFULL;
3778
3779                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3780                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3781                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3782                         tg3_writephy(tp, MII_BMCR, bmcr);
3783
3784                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3785                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3786                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3787
3788                         return err;
3789                 }
3790         } else {
3791                 u32 new_bmcr;
3792
3793                 bmcr &= ~BMCR_SPEED1000;
3794                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3795
3796                 if (tp->link_config.duplex == DUPLEX_FULL)
3797                         new_bmcr |= BMCR_FULLDPLX;
3798
3799                 if (new_bmcr != bmcr) {
3800                         /* BMCR_SPEED1000 is a reserved bit that needs
3801                          * to be set on write.
3802                          */
3803                         new_bmcr |= BMCR_SPEED1000;
3804
3805                         /* Force a linkdown */
3806                         if (netif_carrier_ok(tp->dev)) {
3807                                 u32 adv;
3808
3809                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3810                                 adv &= ~(ADVERTISE_1000XFULL |
3811                                          ADVERTISE_1000XHALF |
3812                                          ADVERTISE_SLCT);
3813                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3814                                 tg3_writephy(tp, MII_BMCR, bmcr |
3815                                                            BMCR_ANRESTART |
3816                                                            BMCR_ANENABLE);
3817                                 udelay(10);
3818                                 netif_carrier_off(tp->dev);
3819                         }
3820                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3821                         bmcr = new_bmcr;
3822                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3823                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3824                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3825                             ASIC_REV_5714) {
3826                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3827                                         bmsr |= BMSR_LSTATUS;
3828                                 else
3829                                         bmsr &= ~BMSR_LSTATUS;
3830                         }
3831                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3832                 }
3833         }
3834
3835         if (bmsr & BMSR_LSTATUS) {
3836                 current_speed = SPEED_1000;
3837                 current_link_up = 1;
3838                 if (bmcr & BMCR_FULLDPLX)
3839                         current_duplex = DUPLEX_FULL;
3840                 else
3841                         current_duplex = DUPLEX_HALF;
3842
3843                 local_adv = 0;
3844                 remote_adv = 0;
3845
3846                 if (bmcr & BMCR_ANENABLE) {
3847                         u32 common;
3848
3849                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3850                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3851                         common = local_adv & remote_adv;
3852                         if (common & (ADVERTISE_1000XHALF |
3853                                       ADVERTISE_1000XFULL)) {
3854                                 if (common & ADVERTISE_1000XFULL)
3855                                         current_duplex = DUPLEX_FULL;
3856                                 else
3857                                         current_duplex = DUPLEX_HALF;
3858                         }
3859                         else
3860                                 current_link_up = 0;
3861                 }
3862         }
3863
3864         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3865                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3866
3867         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3868         if (tp->link_config.active_duplex == DUPLEX_HALF)
3869                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3870
3871         tw32_f(MAC_MODE, tp->mac_mode);
3872         udelay(40);
3873
3874         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3875
3876         tp->link_config.active_speed = current_speed;
3877         tp->link_config.active_duplex = current_duplex;
3878
3879         if (current_link_up != netif_carrier_ok(tp->dev)) {
3880                 if (current_link_up)
3881                         netif_carrier_on(tp->dev);
3882                 else {
3883                         netif_carrier_off(tp->dev);
3884                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3885                 }
3886                 tg3_link_report(tp);
3887         }
3888         return err;
3889 }
3890
3891 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3892 {
3893         if (tp->serdes_counter) {
3894                 /* Give autoneg time to complete. */
3895                 tp->serdes_counter--;
3896                 return;
3897         }
3898         if (!netif_carrier_ok(tp->dev) &&
3899             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3900                 u32 bmcr;
3901
3902                 tg3_readphy(tp, MII_BMCR, &bmcr);
3903                 if (bmcr & BMCR_ANENABLE) {
3904                         u32 phy1, phy2;
3905
3906                         /* Select shadow register 0x1f */
3907                         tg3_writephy(tp, 0x1c, 0x7c00);
3908                         tg3_readphy(tp, 0x1c, &phy1);
3909
3910                         /* Select expansion interrupt status register */
3911                         tg3_writephy(tp, 0x17, 0x0f01);
3912                         tg3_readphy(tp, 0x15, &phy2);
3913                         tg3_readphy(tp, 0x15, &phy2);
3914
3915                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3916                                 /* We have signal detect and not receiving
3917                                  * config code words, link is up by parallel
3918                                  * detection.
3919                                  */
3920
3921                                 bmcr &= ~BMCR_ANENABLE;
3922                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3923                                 tg3_writephy(tp, MII_BMCR, bmcr);
3924                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3925                         }
3926                 }
3927         }
3928         else if (netif_carrier_ok(tp->dev) &&
3929                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3930                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3931                 u32 phy2;
3932
3933                 /* Select expansion interrupt status register */
3934                 tg3_writephy(tp, 0x17, 0x0f01);
3935                 tg3_readphy(tp, 0x15, &phy2);
3936                 if (phy2 & 0x20) {
3937                         u32 bmcr;
3938
3939                         /* Config code words received, turn on autoneg. */
3940                         tg3_readphy(tp, MII_BMCR, &bmcr);
3941                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3942
3943                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3944
3945                 }
3946         }
3947 }
3948
3949 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3950 {
3951         int err;
3952
3953         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3954                 err = tg3_setup_fiber_phy(tp, force_reset);
3955         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3956                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3957         } else {
3958                 err = tg3_setup_copper_phy(tp, force_reset);
3959         }
3960
3961         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3962                 u32 val, scale;
3963
3964                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3965                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3966                         scale = 65;
3967                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3968                         scale = 6;
3969                 else
3970                         scale = 12;
3971
3972                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3973                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3974                 tw32(GRC_MISC_CFG, val);
3975         }
3976
3977         if (tp->link_config.active_speed == SPEED_1000 &&
3978             tp->link_config.active_duplex == DUPLEX_HALF)
3979                 tw32(MAC_TX_LENGTHS,
3980                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3981                       (6 << TX_LENGTHS_IPG_SHIFT) |
3982                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3983         else
3984                 tw32(MAC_TX_LENGTHS,
3985                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3986                       (6 << TX_LENGTHS_IPG_SHIFT) |
3987                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3988
3989         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3990                 if (netif_carrier_ok(tp->dev)) {
3991                         tw32(HOSTCC_STAT_COAL_TICKS,
3992                              tp->coal.stats_block_coalesce_usecs);
3993                 } else {
3994                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3995                 }
3996         }
3997
3998         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3999                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4000                 if (!netif_carrier_ok(tp->dev))
4001                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4002                               tp->pwrmgmt_thresh;
4003                 else
4004                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4005                 tw32(PCIE_PWR_MGMT_THRESH, val);
4006         }
4007
4008         return err;
4009 }
4010
4011 /* This is called whenever we suspect that the system chipset is re-
4012  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4013  * is bogus tx completions. We try to recover by setting the
4014  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4015  * in the workqueue.
4016  */
4017 static void tg3_tx_recover(struct tg3 *tp)
4018 {
4019         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4020                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4021
4022         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4023                "mapped I/O cycles to the network device, attempting to "
4024                "recover. Please report the problem to the driver maintainer "
4025                "and include system chipset information.\n", tp->dev->name);
4026
4027         spin_lock(&tp->lock);
4028         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4029         spin_unlock(&tp->lock);
4030 }
4031
4032 static inline u32 tg3_tx_avail(struct tg3 *tp)
4033 {
4034         smp_mb();
4035         return (tp->tx_pending -
4036                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
4037 }
4038
4039 /* Tigon3 never reports partial packet sends.  So we do not
4040  * need special logic to handle SKBs that have not had all
4041  * of their frags sent yet, like SunGEM does.
4042  */
4043 static void tg3_tx(struct tg3 *tp)
4044 {
4045         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
4046         u32 sw_idx = tp->tx_cons;
4047
4048         while (sw_idx != hw_idx) {
4049                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
4050                 struct sk_buff *skb = ri->skb;
4051                 int i, tx_bug = 0;
4052
4053                 if (unlikely(skb == NULL)) {
4054                         tg3_tx_recover(tp);
4055                         return;
4056                 }
4057
4058                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4059
4060                 ri->skb = NULL;
4061
4062                 sw_idx = NEXT_TX(sw_idx);
4063
4064                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4065                         ri = &tp->tx_buffers[sw_idx];
4066                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4067                                 tx_bug = 1;
4068                         sw_idx = NEXT_TX(sw_idx);
4069                 }
4070
4071                 dev_kfree_skb(skb);
4072
4073                 if (unlikely(tx_bug)) {
4074                         tg3_tx_recover(tp);
4075                         return;
4076                 }
4077         }
4078
4079         tp->tx_cons = sw_idx;
4080
4081         /* Need to make the tx_cons update visible to tg3_start_xmit()
4082          * before checking for netif_queue_stopped().  Without the
4083          * memory barrier, there is a small possibility that tg3_start_xmit()
4084          * will miss it and cause the queue to be stopped forever.
4085          */
4086         smp_mb();
4087
4088         if (unlikely(netif_queue_stopped(tp->dev) &&
4089                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
4090                 netif_tx_lock(tp->dev);
4091                 if (netif_queue_stopped(tp->dev) &&
4092                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
4093                         netif_wake_queue(tp->dev);
4094                 netif_tx_unlock(tp->dev);
4095         }
4096 }
4097
4098 /* Returns size of skb allocated or < 0 on error.
4099  *
4100  * We only need to fill in the address because the other members
4101  * of the RX descriptor are invariant, see tg3_init_rings.
4102  *
4103  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4104  * posting buffers we only dirty the first cache line of the RX
4105  * descriptor (containing the address).  Whereas for the RX status
4106  * buffers the cpu only reads the last cacheline of the RX descriptor
4107  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4108  */
4109 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4110                             int src_idx, u32 dest_idx_unmasked)
4111 {
4112         struct tg3_rx_buffer_desc *desc;
4113         struct ring_info *map, *src_map;
4114         struct sk_buff *skb;
4115         dma_addr_t mapping;
4116         int skb_size, dest_idx;
4117
4118         src_map = NULL;
4119         switch (opaque_key) {
4120         case RXD_OPAQUE_RING_STD:
4121                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4122                 desc = &tp->rx_std[dest_idx];
4123                 map = &tp->rx_std_buffers[dest_idx];
4124                 if (src_idx >= 0)
4125                         src_map = &tp->rx_std_buffers[src_idx];
4126                 skb_size = tp->rx_pkt_buf_sz;
4127                 break;
4128
4129         case RXD_OPAQUE_RING_JUMBO:
4130                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4131                 desc = &tp->rx_jumbo[dest_idx];
4132                 map = &tp->rx_jumbo_buffers[dest_idx];
4133                 if (src_idx >= 0)
4134                         src_map = &tp->rx_jumbo_buffers[src_idx];
4135                 skb_size = RX_JUMBO_PKT_BUF_SZ;
4136                 break;
4137
4138         default:
4139                 return -EINVAL;
4140         }
4141
4142         /* Do not overwrite any of the map or rp information
4143          * until we are sure we can commit to a new buffer.
4144          *
4145          * Callers depend upon this behavior and assume that
4146          * we leave everything unchanged if we fail.
4147          */
4148         skb = netdev_alloc_skb(tp->dev, skb_size);
4149         if (skb == NULL)
4150                 return -ENOMEM;
4151
4152         skb_reserve(skb, tp->rx_offset);
4153
4154         mapping = pci_map_single(tp->pdev, skb->data,
4155                                  skb_size - tp->rx_offset,
4156                                  PCI_DMA_FROMDEVICE);
4157
4158         map->skb = skb;
4159         pci_unmap_addr_set(map, mapping, mapping);
4160
4161         if (src_map != NULL)
4162                 src_map->skb = NULL;
4163
4164         desc->addr_hi = ((u64)mapping >> 32);
4165         desc->addr_lo = ((u64)mapping & 0xffffffff);
4166
4167         return skb_size;
4168 }
4169
4170 /* We only need to move over in the address because the other
4171  * members of the RX descriptor are invariant.  See notes above
4172  * tg3_alloc_rx_skb for full details.
4173  */
4174 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4175                            int src_idx, u32 dest_idx_unmasked)
4176 {
4177         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4178         struct ring_info *src_map, *dest_map;
4179         int dest_idx;
4180
4181         switch (opaque_key) {
4182         case RXD_OPAQUE_RING_STD:
4183                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4184                 dest_desc = &tp->rx_std[dest_idx];
4185                 dest_map = &tp->rx_std_buffers[dest_idx];
4186                 src_desc = &tp->rx_std[src_idx];
4187                 src_map = &tp->rx_std_buffers[src_idx];
4188                 break;
4189
4190         case RXD_OPAQUE_RING_JUMBO:
4191                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4192                 dest_desc = &tp->rx_jumbo[dest_idx];
4193                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4194                 src_desc = &tp->rx_jumbo[src_idx];
4195                 src_map = &tp->rx_jumbo_buffers[src_idx];
4196                 break;
4197
4198         default:
4199                 return;
4200         }
4201
4202         dest_map->skb = src_map->skb;
4203         pci_unmap_addr_set(dest_map, mapping,
4204                            pci_unmap_addr(src_map, mapping));
4205         dest_desc->addr_hi = src_desc->addr_hi;
4206         dest_desc->addr_lo = src_desc->addr_lo;
4207
4208         src_map->skb = NULL;
4209 }
4210
4211 #if TG3_VLAN_TAG_USED
4212 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4213 {
4214         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4215 }
4216 #endif
4217
4218 /* The RX ring scheme is composed of multiple rings which post fresh
4219  * buffers to the chip, and one special ring the chip uses to report
4220  * status back to the host.
4221  *
4222  * The special ring reports the status of received packets to the
4223  * host.  The chip does not write into the original descriptor the
4224  * RX buffer was obtained from.  The chip simply takes the original
4225  * descriptor as provided by the host, updates the status and length
4226  * field, then writes this into the next status ring entry.
4227  *
4228  * Each ring the host uses to post buffers to the chip is described
4229  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4230  * it is first placed into the on-chip ram.  When the packet's length
4231  * is known, it walks down the TG3_BDINFO entries to select the ring.
4232  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4233  * which is within the range of the new packet's length is chosen.
4234  *
4235  * The "separate ring for rx status" scheme may sound queer, but it makes
4236  * sense from a cache coherency perspective.  If only the host writes
4237  * to the buffer post rings, and only the chip writes to the rx status
4238  * rings, then cache lines never move beyond shared-modified state.
4239  * If both the host and chip were to write into the same ring, cache line
4240  * eviction could occur since both entities want it in an exclusive state.
4241  */
4242 static int tg3_rx(struct tg3 *tp, int budget)
4243 {
4244         u32 work_mask, rx_std_posted = 0;
4245         u32 sw_idx = tp->rx_rcb_ptr;
4246         u16 hw_idx;
4247         int received;
4248
4249         hw_idx = tp->hw_status->idx[0].rx_producer;
4250         /*
4251          * We need to order the read of hw_idx and the read of
4252          * the opaque cookie.
4253          */
4254         rmb();
4255         work_mask = 0;
4256         received = 0;
4257         while (sw_idx != hw_idx && budget > 0) {
4258                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4259                 unsigned int len;
4260                 struct sk_buff *skb;
4261                 dma_addr_t dma_addr;
4262                 u32 opaque_key, desc_idx, *post_ptr;
4263
4264                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4265                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4266                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4267                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4268                                                   mapping);
4269                         skb = tp->rx_std_buffers[desc_idx].skb;
4270                         post_ptr = &tp->rx_std_ptr;
4271                         rx_std_posted++;
4272                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4273                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4274                                                   mapping);
4275                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4276                         post_ptr = &tp->rx_jumbo_ptr;
4277                 }
4278                 else {
4279                         goto next_pkt_nopost;
4280                 }
4281
4282                 work_mask |= opaque_key;
4283
4284                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4285                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4286                 drop_it:
4287                         tg3_recycle_rx(tp, opaque_key,
4288                                        desc_idx, *post_ptr);
4289                 drop_it_no_recycle:
4290                         /* Other statistics kept track of by card. */
4291                         tp->net_stats.rx_dropped++;
4292                         goto next_pkt;
4293                 }
4294
4295                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4296                       ETH_FCS_LEN;
4297
4298                 if (len > RX_COPY_THRESHOLD
4299                         && tp->rx_offset == NET_IP_ALIGN
4300                         /* rx_offset will likely not equal NET_IP_ALIGN
4301                          * if this is a 5701 card running in PCI-X mode
4302                          * [see tg3_get_invariants()]
4303                          */
4304                 ) {
4305                         int skb_size;
4306
4307                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4308                                                     desc_idx, *post_ptr);
4309                         if (skb_size < 0)
4310                                 goto drop_it;
4311
4312                         pci_unmap_single(tp->pdev, dma_addr,
4313                                          skb_size - tp->rx_offset,
4314                                          PCI_DMA_FROMDEVICE);
4315
4316                         skb_put(skb, len);
4317                 } else {
4318                         struct sk_buff *copy_skb;
4319
4320                         tg3_recycle_rx(tp, opaque_key,
4321                                        desc_idx, *post_ptr);
4322
4323                         copy_skb = netdev_alloc_skb(tp->dev,
4324                                                     len + TG3_RAW_IP_ALIGN);
4325                         if (copy_skb == NULL)
4326                                 goto drop_it_no_recycle;
4327
4328                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4329                         skb_put(copy_skb, len);
4330                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4331                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4332                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4333
4334                         /* We'll reuse the original ring buffer. */
4335                         skb = copy_skb;
4336                 }
4337
4338                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4339                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4340                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4341                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4342                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4343                 else
4344                         skb->ip_summed = CHECKSUM_NONE;
4345
4346                 skb->protocol = eth_type_trans(skb, tp->dev);
4347 #if TG3_VLAN_TAG_USED
4348                 if (tp->vlgrp != NULL &&
4349                     desc->type_flags & RXD_FLAG_VLAN) {
4350                         tg3_vlan_rx(tp, skb,
4351                                     desc->err_vlan & RXD_VLAN_MASK);
4352                 } else
4353 #endif
4354                         netif_receive_skb(skb);
4355
4356                 received++;
4357                 budget--;
4358
4359 next_pkt:
4360                 (*post_ptr)++;
4361
4362                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4363                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4364
4365                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4366                                      TG3_64BIT_REG_LOW, idx);
4367                         work_mask &= ~RXD_OPAQUE_RING_STD;
4368                         rx_std_posted = 0;
4369                 }
4370 next_pkt_nopost:
4371                 sw_idx++;
4372                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4373
4374                 /* Refresh hw_idx to see if there is new work */
4375                 if (sw_idx == hw_idx) {
4376                         hw_idx = tp->hw_status->idx[0].rx_producer;
4377                         rmb();
4378                 }
4379         }
4380
4381         /* ACK the status ring. */
4382         tp->rx_rcb_ptr = sw_idx;
4383         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4384
4385         /* Refill RX ring(s). */
4386         if (work_mask & RXD_OPAQUE_RING_STD) {
4387                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4388                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4389                              sw_idx);
4390         }
4391         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4392                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4393                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4394                              sw_idx);
4395         }
4396         mmiowb();
4397
4398         return received;
4399 }
4400
4401 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4402 {
4403         struct tg3_hw_status *sblk = tp->hw_status;
4404
4405         /* handle link change and other phy events */
4406         if (!(tp->tg3_flags &
4407               (TG3_FLAG_USE_LINKCHG_REG |
4408                TG3_FLAG_POLL_SERDES))) {
4409                 if (sblk->status & SD_STATUS_LINK_CHG) {
4410                         sblk->status = SD_STATUS_UPDATED |
4411                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4412                         spin_lock(&tp->lock);
4413                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4414                                 tw32_f(MAC_STATUS,
4415                                      (MAC_STATUS_SYNC_CHANGED |
4416                                       MAC_STATUS_CFG_CHANGED |
4417                                       MAC_STATUS_MI_COMPLETION |
4418                                       MAC_STATUS_LNKSTATE_CHANGED));
4419                                 udelay(40);
4420                         } else
4421                                 tg3_setup_phy(tp, 0);
4422                         spin_unlock(&tp->lock);
4423                 }
4424         }
4425
4426         /* run TX completion thread */
4427         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4428                 tg3_tx(tp);
4429                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4430                         return work_done;
4431         }
4432
4433         /* run RX thread, within the bounds set by NAPI.
4434          * All RX "locking" is done by ensuring outside
4435          * code synchronizes with tg3->napi.poll()
4436          */
4437         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4438                 work_done += tg3_rx(tp, budget - work_done);
4439
4440         return work_done;
4441 }
4442
4443 static int tg3_poll(struct napi_struct *napi, int budget)
4444 {
4445         struct tg3 *tp = container_of(napi, struct tg3, napi);
4446         int work_done = 0;
4447         struct tg3_hw_status *sblk = tp->hw_status;
4448
4449         while (1) {
4450                 work_done = tg3_poll_work(tp, work_done, budget);
4451
4452                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4453                         goto tx_recovery;
4454
4455                 if (unlikely(work_done >= budget))
4456                         break;
4457
4458                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4459                         /* tp->last_tag is used in tg3_restart_ints() below
4460                          * to tell the hw how much work has been processed,
4461                          * so we must read it before checking for more work.
4462                          */
4463                         tp->last_tag = sblk->status_tag;
4464                         rmb();
4465                 } else
4466                         sblk->status &= ~SD_STATUS_UPDATED;
4467
4468                 if (likely(!tg3_has_work(tp))) {
4469                         netif_rx_complete(tp->dev, napi);
4470                         tg3_restart_ints(tp);
4471                         break;
4472                 }
4473         }
4474
4475         return work_done;
4476
4477 tx_recovery:
4478         /* work_done is guaranteed to be less than budget. */
4479         netif_rx_complete(tp->dev, napi);
4480         schedule_work(&tp->reset_task);
4481         return work_done;
4482 }
4483
4484 static void tg3_irq_quiesce(struct tg3 *tp)
4485 {
4486         BUG_ON(tp->irq_sync);
4487
4488         tp->irq_sync = 1;
4489         smp_mb();
4490
4491         synchronize_irq(tp->pdev->irq);
4492 }
4493
4494 static inline int tg3_irq_sync(struct tg3 *tp)
4495 {
4496         return tp->irq_sync;
4497 }
4498
4499 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4500  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4501  * with as well.  Most of the time, this is not necessary except when
4502  * shutting down the device.
4503  */
4504 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4505 {
4506         spin_lock_bh(&tp->lock);
4507         if (irq_sync)
4508                 tg3_irq_quiesce(tp);
4509 }
4510
4511 static inline void tg3_full_unlock(struct tg3 *tp)
4512 {
4513         spin_unlock_bh(&tp->lock);
4514 }
4515
4516 /* One-shot MSI handler - Chip automatically disables interrupt
4517  * after sending MSI so driver doesn't have to do it.
4518  */
4519 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4520 {
4521         struct net_device *dev = dev_id;
4522         struct tg3 *tp = netdev_priv(dev);
4523
4524         prefetch(tp->hw_status);
4525         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4526
4527         if (likely(!tg3_irq_sync(tp)))
4528                 netif_rx_schedule(dev, &tp->napi);
4529
4530         return IRQ_HANDLED;
4531 }
4532
4533 /* MSI ISR - No need to check for interrupt sharing and no need to
4534  * flush status block and interrupt mailbox. PCI ordering rules
4535  * guarantee that MSI will arrive after the status block.
4536  */
4537 static irqreturn_t tg3_msi(int irq, void *dev_id)
4538 {
4539         struct net_device *dev = dev_id;
4540         struct tg3 *tp = netdev_priv(dev);
4541
4542         prefetch(tp->hw_status);
4543         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4544         /*
4545          * Writing any value to intr-mbox-0 clears PCI INTA# and
4546          * chip-internal interrupt pending events.
4547          * Writing non-zero to intr-mbox-0 additional tells the
4548          * NIC to stop sending us irqs, engaging "in-intr-handler"
4549          * event coalescing.
4550          */
4551         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4552         if (likely(!tg3_irq_sync(tp)))
4553                 netif_rx_schedule(dev, &tp->napi);
4554
4555         return IRQ_RETVAL(1);
4556 }
4557
4558 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4559 {
4560         struct net_device *dev = dev_id;
4561         struct tg3 *tp = netdev_priv(dev);
4562         struct tg3_hw_status *sblk = tp->hw_status;
4563         unsigned int handled = 1;
4564
4565         /* In INTx mode, it is possible for the interrupt to arrive at
4566          * the CPU before the status block posted prior to the interrupt.
4567          * Reading the PCI State register will confirm whether the
4568          * interrupt is ours and will flush the status block.
4569          */
4570         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4571                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4572                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4573                         handled = 0;
4574                         goto out;
4575                 }
4576         }
4577
4578         /*
4579          * Writing any value to intr-mbox-0 clears PCI INTA# and
4580          * chip-internal interrupt pending events.
4581          * Writing non-zero to intr-mbox-0 additional tells the
4582          * NIC to stop sending us irqs, engaging "in-intr-handler"
4583          * event coalescing.
4584          *
4585          * Flush the mailbox to de-assert the IRQ immediately to prevent
4586          * spurious interrupts.  The flush impacts performance but
4587          * excessive spurious interrupts can be worse in some cases.
4588          */
4589         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4590         if (tg3_irq_sync(tp))
4591                 goto out;
4592         sblk->status &= ~SD_STATUS_UPDATED;
4593         if (likely(tg3_has_work(tp))) {
4594                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4595                 netif_rx_schedule(dev, &tp->napi);
4596         } else {
4597                 /* No work, shared interrupt perhaps?  re-enable
4598                  * interrupts, and flush that PCI write
4599                  */
4600                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4601                                0x00000000);
4602         }
4603 out:
4604         return IRQ_RETVAL(handled);
4605 }
4606
4607 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4608 {
4609         struct net_device *dev = dev_id;
4610         struct tg3 *tp = netdev_priv(dev);
4611         struct tg3_hw_status *sblk = tp->hw_status;
4612         unsigned int handled = 1;
4613
4614         /* In INTx mode, it is possible for the interrupt to arrive at
4615          * the CPU before the status block posted prior to the interrupt.
4616          * Reading the PCI State register will confirm whether the
4617          * interrupt is ours and will flush the status block.
4618          */
4619         if (unlikely(sblk->status_tag == tp->last_tag)) {
4620                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4621                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4622                         handled = 0;
4623                         goto out;
4624                 }
4625         }
4626
4627         /*
4628          * writing any value to intr-mbox-0 clears PCI INTA# and
4629          * chip-internal interrupt pending events.
4630          * writing non-zero to intr-mbox-0 additional tells the
4631          * NIC to stop sending us irqs, engaging "in-intr-handler"
4632          * event coalescing.
4633          *
4634          * Flush the mailbox to de-assert the IRQ immediately to prevent
4635          * spurious interrupts.  The flush impacts performance but
4636          * excessive spurious interrupts can be worse in some cases.
4637          */
4638         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4639         if (tg3_irq_sync(tp))
4640                 goto out;
4641         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4642                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4643                 /* Update last_tag to mark that this status has been
4644                  * seen. Because interrupt may be shared, we may be
4645                  * racing with tg3_poll(), so only update last_tag
4646                  * if tg3_poll() is not scheduled.
4647                  */
4648                 tp->last_tag = sblk->status_tag;
4649                 __netif_rx_schedule(dev, &tp->napi);
4650         }
4651 out:
4652         return IRQ_RETVAL(handled);
4653 }
4654
4655 /* ISR for interrupt test */
4656 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4657 {
4658         struct net_device *dev = dev_id;
4659         struct tg3 *tp = netdev_priv(dev);
4660         struct tg3_hw_status *sblk = tp->hw_status;
4661
4662         if ((sblk->status & SD_STATUS_UPDATED) ||
4663             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4664                 tg3_disable_ints(tp);
4665                 return IRQ_RETVAL(1);
4666         }
4667         return IRQ_RETVAL(0);
4668 }
4669
4670 static int tg3_init_hw(struct tg3 *, int);
4671 static int tg3_halt(struct tg3 *, int, int);
4672
4673 /* Restart hardware after configuration changes, self-test, etc.
4674  * Invoked with tp->lock held.
4675  */
4676 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4677         __releases(tp->lock)
4678         __acquires(tp->lock)
4679 {
4680         int err;
4681
4682         err = tg3_init_hw(tp, reset_phy);
4683         if (err) {
4684                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4685                        "aborting.\n", tp->dev->name);
4686                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4687                 tg3_full_unlock(tp);
4688                 del_timer_sync(&tp->timer);
4689                 tp->irq_sync = 0;
4690                 napi_enable(&tp->napi);
4691                 dev_close(tp->dev);
4692                 tg3_full_lock(tp, 0);
4693         }
4694         return err;
4695 }
4696
4697 #ifdef CONFIG_NET_POLL_CONTROLLER
4698 static void tg3_poll_controller(struct net_device *dev)
4699 {
4700         struct tg3 *tp = netdev_priv(dev);
4701
4702         tg3_interrupt(tp->pdev->irq, dev);
4703 }
4704 #endif
4705
4706 static void tg3_reset_task(struct work_struct *work)
4707 {
4708         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4709         int err;
4710         unsigned int restart_timer;
4711
4712         tg3_full_lock(tp, 0);
4713
4714         if (!netif_running(tp->dev)) {
4715                 tg3_full_unlock(tp);
4716                 return;
4717         }
4718
4719         tg3_full_unlock(tp);
4720
4721         tg3_phy_stop(tp);
4722
4723         tg3_netif_stop(tp);
4724
4725         tg3_full_lock(tp, 1);
4726
4727         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4728         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4729
4730         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4731                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4732                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4733                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4734                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4735         }
4736
4737         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4738         err = tg3_init_hw(tp, 1);
4739         if (err)
4740                 goto out;
4741
4742         tg3_netif_start(tp);
4743
4744         if (restart_timer)
4745                 mod_timer(&tp->timer, jiffies + 1);
4746
4747 out:
4748         tg3_full_unlock(tp);
4749
4750         if (!err)
4751                 tg3_phy_start(tp);
4752 }
4753
4754 static void tg3_dump_short_state(struct tg3 *tp)
4755 {
4756         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4757                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4758         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4759                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4760 }
4761
4762 static void tg3_tx_timeout(struct net_device *dev)
4763 {
4764         struct tg3 *tp = netdev_priv(dev);
4765
4766         if (netif_msg_tx_err(tp)) {
4767                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4768                        dev->name);
4769                 tg3_dump_short_state(tp);
4770         }
4771
4772         schedule_work(&tp->reset_task);
4773 }
4774
4775 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4776 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4777 {
4778         u32 base = (u32) mapping & 0xffffffff;
4779
4780         return ((base > 0xffffdcc0) &&
4781                 (base + len + 8 < base));
4782 }
4783
4784 /* Test for DMA addresses > 40-bit */
4785 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4786                                           int len)
4787 {
4788 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4789         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4790                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4791         return 0;
4792 #else
4793         return 0;
4794 #endif
4795 }
4796
4797 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4798
4799 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4800 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4801                                        u32 last_plus_one, u32 *start,
4802                                        u32 base_flags, u32 mss)
4803 {
4804         struct sk_buff *new_skb;
4805         dma_addr_t new_addr = 0;
4806         u32 entry = *start;
4807         int i, ret = 0;
4808
4809         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4810                 new_skb = skb_copy(skb, GFP_ATOMIC);
4811         else {
4812                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4813
4814                 new_skb = skb_copy_expand(skb,
4815                                           skb_headroom(skb) + more_headroom,
4816                                           skb_tailroom(skb), GFP_ATOMIC);
4817         }
4818
4819         if (!new_skb) {
4820                 ret = -1;
4821         } else {
4822                 /* New SKB is guaranteed to be linear. */
4823                 entry = *start;
4824                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4825                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4826
4827                 /* Make sure new skb does not cross any 4G boundaries.
4828                  * Drop the packet if it does.
4829                  */
4830                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4831                         if (!ret)
4832                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4833                                               DMA_TO_DEVICE);
4834                         ret = -1;
4835                         dev_kfree_skb(new_skb);
4836                         new_skb = NULL;
4837                 } else {
4838                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4839                                     base_flags, 1 | (mss << 1));
4840                         *start = NEXT_TX(entry);
4841                 }
4842         }
4843
4844         /* Now clean up the sw ring entries. */
4845         i = 0;
4846         while (entry != last_plus_one) {
4847                 if (i == 0) {
4848                         tp->tx_buffers[entry].skb = new_skb;
4849                 } else {
4850                         tp->tx_buffers[entry].skb = NULL;
4851                 }
4852                 entry = NEXT_TX(entry);
4853                 i++;
4854         }
4855
4856         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4857         dev_kfree_skb(skb);
4858
4859         return ret;
4860 }
4861
4862 static void tg3_set_txd(struct tg3 *tp, int entry,
4863                         dma_addr_t mapping, int len, u32 flags,
4864                         u32 mss_and_is_end)
4865 {
4866         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4867         int is_end = (mss_and_is_end & 0x1);
4868         u32 mss = (mss_and_is_end >> 1);
4869         u32 vlan_tag = 0;
4870
4871         if (is_end)
4872                 flags |= TXD_FLAG_END;
4873         if (flags & TXD_FLAG_VLAN) {
4874                 vlan_tag = flags >> 16;
4875                 flags &= 0xffff;
4876         }
4877         vlan_tag |= (mss << TXD_MSS_SHIFT);
4878
4879         txd->addr_hi = ((u64) mapping >> 32);
4880         txd->addr_lo = ((u64) mapping & 0xffffffff);
4881         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4882         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4883 }
4884
4885 /* hard_start_xmit for devices that don't have any bugs and
4886  * support TG3_FLG2_HW_TSO_2 only.
4887  */
4888 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4889 {
4890         struct tg3 *tp = netdev_priv(dev);
4891         u32 len, entry, base_flags, mss;
4892         struct skb_shared_info *sp;
4893         dma_addr_t mapping;
4894
4895         len = skb_headlen(skb);
4896
4897         /* We are running in BH disabled context with netif_tx_lock
4898          * and TX reclaim runs via tp->napi.poll inside of a software
4899          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4900          * no IRQ context deadlocks to worry about either.  Rejoice!
4901          */
4902         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4903                 if (!netif_queue_stopped(dev)) {
4904                         netif_stop_queue(dev);
4905
4906                         /* This is a hard error, log it. */
4907                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4908                                "queue awake!\n", dev->name);
4909                 }
4910                 return NETDEV_TX_BUSY;
4911         }
4912
4913         entry = tp->tx_prod;
4914         base_flags = 0;
4915         mss = 0;
4916         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4917                 int tcp_opt_len, ip_tcp_len;
4918
4919                 if (skb_header_cloned(skb) &&
4920                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4921                         dev_kfree_skb(skb);
4922                         goto out_unlock;
4923                 }
4924
4925                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4926                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4927                 else {
4928                         struct iphdr *iph = ip_hdr(skb);
4929
4930                         tcp_opt_len = tcp_optlen(skb);
4931                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4932
4933                         iph->check = 0;
4934                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4935                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4936                 }
4937
4938                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4939                                TXD_FLAG_CPU_POST_DMA);
4940
4941                 tcp_hdr(skb)->check = 0;
4942
4943         }
4944         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4945                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4946 #if TG3_VLAN_TAG_USED
4947         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4948                 base_flags |= (TXD_FLAG_VLAN |
4949                                (vlan_tx_tag_get(skb) << 16));
4950 #endif
4951
4952         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4953                 dev_kfree_skb(skb);
4954                 goto out_unlock;
4955         }
4956
4957         sp = skb_shinfo(skb);
4958
4959         mapping = sp->dma_maps[0];
4960
4961         tp->tx_buffers[entry].skb = skb;
4962
4963         tg3_set_txd(tp, entry, mapping, len, base_flags,
4964                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4965
4966         entry = NEXT_TX(entry);
4967
4968         /* Now loop through additional data fragments, and queue them. */
4969         if (skb_shinfo(skb)->nr_frags > 0) {
4970                 unsigned int i, last;
4971
4972                 last = skb_shinfo(skb)->nr_frags - 1;
4973                 for (i = 0; i <= last; i++) {
4974                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4975
4976                         len = frag->size;
4977                         mapping = sp->dma_maps[i + 1];
4978                         tp->tx_buffers[entry].skb = NULL;
4979
4980                         tg3_set_txd(tp, entry, mapping, len,
4981                                     base_flags, (i == last) | (mss << 1));
4982
4983                         entry = NEXT_TX(entry);
4984                 }
4985         }
4986
4987         /* Packets are ready, update Tx producer idx local and on card. */
4988         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4989
4990         tp->tx_prod = entry;
4991         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4992                 netif_stop_queue(dev);
4993                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4994                         netif_wake_queue(tp->dev);
4995         }
4996
4997 out_unlock:
4998         mmiowb();
4999
5000         dev->trans_start = jiffies;
5001
5002         return NETDEV_TX_OK;
5003 }
5004
5005 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
5006
5007 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5008  * TSO header is greater than 80 bytes.
5009  */
5010 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5011 {
5012         struct sk_buff *segs, *nskb;
5013
5014         /* Estimate the number of fragments in the worst case */
5015         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
5016                 netif_stop_queue(tp->dev);
5017                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
5018                         return NETDEV_TX_BUSY;
5019
5020                 netif_wake_queue(tp->dev);
5021         }
5022
5023         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5024         if (IS_ERR(segs))
5025                 goto tg3_tso_bug_end;
5026
5027         do {
5028                 nskb = segs;
5029                 segs = segs->next;
5030                 nskb->next = NULL;
5031                 tg3_start_xmit_dma_bug(nskb, tp->dev);
5032         } while (segs);
5033
5034 tg3_tso_bug_end:
5035         dev_kfree_skb(skb);
5036
5037         return NETDEV_TX_OK;
5038 }
5039
5040 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5041  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5042  */
5043 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5044 {
5045         struct tg3 *tp = netdev_priv(dev);
5046         u32 len, entry, base_flags, mss;
5047         struct skb_shared_info *sp;
5048         int would_hit_hwbug;
5049         dma_addr_t mapping;
5050
5051         len = skb_headlen(skb);
5052
5053         /* We are running in BH disabled context with netif_tx_lock
5054          * and TX reclaim runs via tp->napi.poll inside of a software
5055          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5056          * no IRQ context deadlocks to worry about either.  Rejoice!
5057          */
5058         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5059                 if (!netif_queue_stopped(dev)) {
5060                         netif_stop_queue(dev);
5061
5062                         /* This is a hard error, log it. */
5063                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5064                                "queue awake!\n", dev->name);
5065                 }
5066                 return NETDEV_TX_BUSY;
5067         }
5068
5069         entry = tp->tx_prod;
5070         base_flags = 0;
5071         if (skb->ip_summed == CHECKSUM_PARTIAL)
5072                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5073         mss = 0;
5074         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5075                 struct iphdr *iph;
5076                 int tcp_opt_len, ip_tcp_len, hdr_len;
5077
5078                 if (skb_header_cloned(skb) &&
5079                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5080                         dev_kfree_skb(skb);
5081                         goto out_unlock;
5082                 }
5083
5084                 tcp_opt_len = tcp_optlen(skb);
5085                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5086
5087                 hdr_len = ip_tcp_len + tcp_opt_len;
5088                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5089                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5090                         return (tg3_tso_bug(tp, skb));
5091
5092                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5093                                TXD_FLAG_CPU_POST_DMA);
5094
5095                 iph = ip_hdr(skb);
5096                 iph->check = 0;
5097                 iph->tot_len = htons(mss + hdr_len);
5098                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5099                         tcp_hdr(skb)->check = 0;
5100                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5101                 } else
5102                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5103                                                                  iph->daddr, 0,
5104                                                                  IPPROTO_TCP,
5105                                                                  0);
5106
5107                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5108                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5109                         if (tcp_opt_len || iph->ihl > 5) {
5110                                 int tsflags;
5111
5112                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5113                                 mss |= (tsflags << 11);
5114                         }
5115                 } else {
5116                         if (tcp_opt_len || iph->ihl > 5) {
5117                                 int tsflags;
5118
5119                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5120                                 base_flags |= tsflags << 12;
5121                         }
5122                 }
5123         }
5124 #if TG3_VLAN_TAG_USED
5125         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5126                 base_flags |= (TXD_FLAG_VLAN |
5127                                (vlan_tx_tag_get(skb) << 16));
5128 #endif
5129
5130         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5131                 dev_kfree_skb(skb);
5132                 goto out_unlock;
5133         }
5134
5135         sp = skb_shinfo(skb);
5136
5137         mapping = sp->dma_maps[0];
5138
5139         tp->tx_buffers[entry].skb = skb;
5140
5141         would_hit_hwbug = 0;
5142
5143         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5144                 would_hit_hwbug = 1;
5145         else if (tg3_4g_overflow_test(mapping, len))
5146                 would_hit_hwbug = 1;
5147
5148         tg3_set_txd(tp, entry, mapping, len, base_flags,
5149                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5150
5151         entry = NEXT_TX(entry);
5152
5153         /* Now loop through additional data fragments, and queue them. */
5154         if (skb_shinfo(skb)->nr_frags > 0) {
5155                 unsigned int i, last;
5156
5157                 last = skb_shinfo(skb)->nr_frags - 1;
5158                 for (i = 0; i <= last; i++) {
5159                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5160
5161                         len = frag->size;
5162                         mapping = sp->dma_maps[i + 1];
5163
5164                         tp->tx_buffers[entry].skb = NULL;
5165
5166                         if (tg3_4g_overflow_test(mapping, len))
5167                                 would_hit_hwbug = 1;
5168
5169                         if (tg3_40bit_overflow_test(tp, mapping, len))
5170                                 would_hit_hwbug = 1;
5171
5172                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5173                                 tg3_set_txd(tp, entry, mapping, len,
5174                                             base_flags, (i == last)|(mss << 1));
5175                         else
5176                                 tg3_set_txd(tp, entry, mapping, len,
5177                                             base_flags, (i == last));
5178
5179                         entry = NEXT_TX(entry);
5180                 }
5181         }
5182
5183         if (would_hit_hwbug) {
5184                 u32 last_plus_one = entry;
5185                 u32 start;
5186
5187                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5188                 start &= (TG3_TX_RING_SIZE - 1);
5189
5190                 /* If the workaround fails due to memory/mapping
5191                  * failure, silently drop this packet.
5192                  */
5193                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5194                                                 &start, base_flags, mss))
5195                         goto out_unlock;
5196
5197                 entry = start;
5198         }
5199
5200         /* Packets are ready, update Tx producer idx local and on card. */
5201         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5202
5203         tp->tx_prod = entry;
5204         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5205                 netif_stop_queue(dev);
5206                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5207                         netif_wake_queue(tp->dev);
5208         }
5209
5210 out_unlock:
5211         mmiowb();
5212
5213         dev->trans_start = jiffies;
5214
5215         return NETDEV_TX_OK;
5216 }
5217
5218 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5219                                int new_mtu)
5220 {
5221         dev->mtu = new_mtu;
5222
5223         if (new_mtu > ETH_DATA_LEN) {
5224                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5225                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5226                         ethtool_op_set_tso(dev, 0);
5227                 }
5228                 else
5229                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5230         } else {
5231                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5232                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5233                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5234         }
5235 }
5236
5237 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5238 {
5239         struct tg3 *tp = netdev_priv(dev);
5240         int err;
5241
5242         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5243                 return -EINVAL;
5244
5245         if (!netif_running(dev)) {
5246                 /* We'll just catch it later when the
5247                  * device is up'd.
5248                  */
5249                 tg3_set_mtu(dev, tp, new_mtu);
5250                 return 0;
5251         }
5252
5253         tg3_phy_stop(tp);
5254
5255         tg3_netif_stop(tp);
5256
5257         tg3_full_lock(tp, 1);
5258
5259         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5260
5261         tg3_set_mtu(dev, tp, new_mtu);
5262
5263         err = tg3_restart_hw(tp, 0);
5264
5265         if (!err)
5266                 tg3_netif_start(tp);
5267
5268         tg3_full_unlock(tp);
5269
5270         if (!err)
5271                 tg3_phy_start(tp);
5272
5273         return err;
5274 }
5275
5276 /* Free up pending packets in all rx/tx rings.
5277  *
5278  * The chip has been shut down and the driver detached from
5279  * the networking, so no interrupts or new tx packets will
5280  * end up in the driver.  tp->{tx,}lock is not held and we are not
5281  * in an interrupt context and thus may sleep.
5282  */
5283 static void tg3_free_rings(struct tg3 *tp)
5284 {
5285         struct ring_info *rxp;
5286         int i;
5287
5288         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5289                 rxp = &tp->rx_std_buffers[i];
5290
5291                 if (rxp->skb == NULL)
5292                         continue;
5293                 pci_unmap_single(tp->pdev,
5294                                  pci_unmap_addr(rxp, mapping),
5295                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5296                                  PCI_DMA_FROMDEVICE);
5297                 dev_kfree_skb_any(rxp->skb);
5298                 rxp->skb = NULL;
5299         }
5300
5301         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5302                 rxp = &tp->rx_jumbo_buffers[i];
5303
5304                 if (rxp->skb == NULL)
5305                         continue;
5306                 pci_unmap_single(tp->pdev,
5307                                  pci_unmap_addr(rxp, mapping),
5308                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5309                                  PCI_DMA_FROMDEVICE);
5310                 dev_kfree_skb_any(rxp->skb);
5311                 rxp->skb = NULL;
5312         }
5313
5314         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5315                 struct tx_ring_info *txp;
5316                 struct sk_buff *skb;
5317
5318                 txp = &tp->tx_buffers[i];
5319                 skb = txp->skb;
5320
5321                 if (skb == NULL) {
5322                         i++;
5323                         continue;
5324                 }
5325
5326                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5327
5328                 txp->skb = NULL;
5329
5330                 i += skb_shinfo(skb)->nr_frags + 1;
5331
5332                 dev_kfree_skb_any(skb);
5333         }
5334 }
5335
5336 /* Initialize tx/rx rings for packet processing.
5337  *
5338  * The chip has been shut down and the driver detached from
5339  * the networking, so no interrupts or new tx packets will
5340  * end up in the driver.  tp->{tx,}lock are held and thus
5341  * we may not sleep.
5342  */
5343 static int tg3_init_rings(struct tg3 *tp)
5344 {
5345         u32 i;
5346
5347         /* Free up all the SKBs. */
5348         tg3_free_rings(tp);
5349
5350         /* Zero out all descriptors. */
5351         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5352         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5353         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5354         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5355
5356         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5357         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5358             (tp->dev->mtu > ETH_DATA_LEN))
5359                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5360
5361         /* Initialize invariants of the rings, we only set this
5362          * stuff once.  This works because the card does not
5363          * write into the rx buffer posting rings.
5364          */
5365         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5366                 struct tg3_rx_buffer_desc *rxd;
5367
5368                 rxd = &tp->rx_std[i];
5369                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5370                         << RXD_LEN_SHIFT;
5371                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5372                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5373                                (i << RXD_OPAQUE_INDEX_SHIFT));
5374         }
5375
5376         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5377                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5378                         struct tg3_rx_buffer_desc *rxd;
5379
5380                         rxd = &tp->rx_jumbo[i];
5381                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5382                                 << RXD_LEN_SHIFT;
5383                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5384                                 RXD_FLAG_JUMBO;
5385                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5386                                (i << RXD_OPAQUE_INDEX_SHIFT));
5387                 }
5388         }
5389
5390         /* Now allocate fresh SKBs for each rx ring. */
5391         for (i = 0; i < tp->rx_pending; i++) {
5392                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5393                         printk(KERN_WARNING PFX
5394                                "%s: Using a smaller RX standard ring, "
5395                                "only %d out of %d buffers were allocated "
5396                                "successfully.\n",
5397                                tp->dev->name, i, tp->rx_pending);
5398                         if (i == 0)
5399                                 return -ENOMEM;
5400                         tp->rx_pending = i;
5401                         break;
5402                 }
5403         }
5404
5405         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5406                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5407                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5408                                              -1, i) < 0) {
5409                                 printk(KERN_WARNING PFX
5410                                        "%s: Using a smaller RX jumbo ring, "
5411                                        "only %d out of %d buffers were "
5412                                        "allocated successfully.\n",
5413                                        tp->dev->name, i, tp->rx_jumbo_pending);
5414                                 if (i == 0) {
5415                                         tg3_free_rings(tp);
5416                                         return -ENOMEM;
5417                                 }
5418                                 tp->rx_jumbo_pending = i;
5419                                 break;
5420                         }
5421                 }
5422         }
5423         return 0;
5424 }
5425
5426 /*
5427  * Must not be invoked with interrupt sources disabled and
5428  * the hardware shutdown down.
5429  */
5430 static void tg3_free_consistent(struct tg3 *tp)
5431 {
5432         kfree(tp->rx_std_buffers);
5433         tp->rx_std_buffers = NULL;
5434         if (tp->rx_std) {
5435                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5436                                     tp->rx_std, tp->rx_std_mapping);
5437                 tp->rx_std = NULL;
5438         }
5439         if (tp->rx_jumbo) {
5440                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5441                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5442                 tp->rx_jumbo = NULL;
5443         }
5444         if (tp->rx_rcb) {
5445                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5446                                     tp->rx_rcb, tp->rx_rcb_mapping);
5447                 tp->rx_rcb = NULL;
5448         }
5449         if (tp->tx_ring) {
5450                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5451                         tp->tx_ring, tp->tx_desc_mapping);
5452                 tp->tx_ring = NULL;
5453         }
5454         if (tp->hw_status) {
5455                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5456                                     tp->hw_status, tp->status_mapping);
5457                 tp->hw_status = NULL;
5458         }
5459         if (tp->hw_stats) {
5460                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5461                                     tp->hw_stats, tp->stats_mapping);
5462                 tp->hw_stats = NULL;
5463         }
5464 }
5465
5466 /*
5467  * Must not be invoked with interrupt sources disabled and
5468  * the hardware shutdown down.  Can sleep.
5469  */
5470 static int tg3_alloc_consistent(struct tg3 *tp)
5471 {
5472         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5473                                       (TG3_RX_RING_SIZE +
5474                                        TG3_RX_JUMBO_RING_SIZE)) +
5475                                      (sizeof(struct tx_ring_info) *
5476                                       TG3_TX_RING_SIZE),
5477                                      GFP_KERNEL);
5478         if (!tp->rx_std_buffers)
5479                 return -ENOMEM;
5480
5481         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5482         tp->tx_buffers = (struct tx_ring_info *)
5483                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5484
5485         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5486                                           &tp->rx_std_mapping);
5487         if (!tp->rx_std)
5488                 goto err_out;
5489
5490         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5491                                             &tp->rx_jumbo_mapping);
5492
5493         if (!tp->rx_jumbo)
5494                 goto err_out;
5495
5496         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5497                                           &tp->rx_rcb_mapping);
5498         if (!tp->rx_rcb)
5499                 goto err_out;
5500
5501         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5502                                            &tp->tx_desc_mapping);
5503         if (!tp->tx_ring)
5504                 goto err_out;
5505
5506         tp->hw_status = pci_alloc_consistent(tp->pdev,
5507                                              TG3_HW_STATUS_SIZE,
5508                                              &tp->status_mapping);
5509         if (!tp->hw_status)
5510                 goto err_out;
5511
5512         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5513                                             sizeof(struct tg3_hw_stats),
5514                                             &tp->stats_mapping);
5515         if (!tp->hw_stats)
5516                 goto err_out;
5517
5518         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5519         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5520
5521         return 0;
5522
5523 err_out:
5524         tg3_free_consistent(tp);
5525         return -ENOMEM;
5526 }
5527
5528 #define MAX_WAIT_CNT 1000
5529
5530 /* To stop a block, clear the enable bit and poll till it
5531  * clears.  tp->lock is held.
5532  */
5533 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5534 {
5535         unsigned int i;
5536         u32 val;
5537
5538         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5539                 switch (ofs) {
5540                 case RCVLSC_MODE:
5541                 case DMAC_MODE:
5542                 case MBFREE_MODE:
5543                 case BUFMGR_MODE:
5544                 case MEMARB_MODE:
5545                         /* We can't enable/disable these bits of the
5546                          * 5705/5750, just say success.
5547                          */
5548                         return 0;
5549
5550                 default:
5551                         break;
5552                 }
5553         }
5554
5555         val = tr32(ofs);
5556         val &= ~enable_bit;
5557         tw32_f(ofs, val);
5558
5559         for (i = 0; i < MAX_WAIT_CNT; i++) {
5560                 udelay(100);
5561                 val = tr32(ofs);
5562                 if ((val & enable_bit) == 0)
5563                         break;
5564         }
5565
5566         if (i == MAX_WAIT_CNT && !silent) {
5567                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5568                        "ofs=%lx enable_bit=%x\n",
5569                        ofs, enable_bit);
5570                 return -ENODEV;
5571         }
5572
5573         return 0;
5574 }
5575
5576 /* tp->lock is held. */
5577 static int tg3_abort_hw(struct tg3 *tp, int silent)
5578 {
5579         int i, err;
5580
5581         tg3_disable_ints(tp);
5582
5583         tp->rx_mode &= ~RX_MODE_ENABLE;
5584         tw32_f(MAC_RX_MODE, tp->rx_mode);
5585         udelay(10);
5586
5587         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5588         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5589         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5590         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5591         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5592         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5593
5594         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5595         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5596         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5597         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5598         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5599         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5600         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5601
5602         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5603         tw32_f(MAC_MODE, tp->mac_mode);
5604         udelay(40);
5605
5606         tp->tx_mode &= ~TX_MODE_ENABLE;
5607         tw32_f(MAC_TX_MODE, tp->tx_mode);
5608
5609         for (i = 0; i < MAX_WAIT_CNT; i++) {
5610                 udelay(100);
5611                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5612                         break;
5613         }
5614         if (i >= MAX_WAIT_CNT) {
5615                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5616                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5617                        tp->dev->name, tr32(MAC_TX_MODE));
5618                 err |= -ENODEV;
5619         }
5620
5621         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5622         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5623         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5624
5625         tw32(FTQ_RESET, 0xffffffff);
5626         tw32(FTQ_RESET, 0x00000000);
5627
5628         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5629         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5630
5631         if (tp->hw_status)
5632                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5633         if (tp->hw_stats)
5634                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5635
5636         return err;
5637 }
5638
5639 /* tp->lock is held. */
5640 static int tg3_nvram_lock(struct tg3 *tp)
5641 {
5642         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5643                 int i;
5644
5645                 if (tp->nvram_lock_cnt == 0) {
5646                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5647                         for (i = 0; i < 8000; i++) {
5648                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5649                                         break;
5650                                 udelay(20);
5651                         }
5652                         if (i == 8000) {
5653                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5654                                 return -ENODEV;
5655                         }
5656                 }
5657                 tp->nvram_lock_cnt++;
5658         }
5659         return 0;
5660 }
5661
5662 /* tp->lock is held. */
5663 static void tg3_nvram_unlock(struct tg3 *tp)
5664 {
5665         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5666                 if (tp->nvram_lock_cnt > 0)
5667                         tp->nvram_lock_cnt--;
5668                 if (tp->nvram_lock_cnt == 0)
5669                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5670         }
5671 }
5672
5673 /* tp->lock is held. */
5674 static void tg3_enable_nvram_access(struct tg3 *tp)
5675 {
5676         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5677             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5678                 u32 nvaccess = tr32(NVRAM_ACCESS);
5679
5680                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5681         }
5682 }
5683
5684 /* tp->lock is held. */
5685 static void tg3_disable_nvram_access(struct tg3 *tp)
5686 {
5687         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5688             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5689                 u32 nvaccess = tr32(NVRAM_ACCESS);
5690
5691                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5692         }
5693 }
5694
5695 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5696 {
5697         int i;
5698         u32 apedata;
5699
5700         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5701         if (apedata != APE_SEG_SIG_MAGIC)
5702                 return;
5703
5704         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5705         if (!(apedata & APE_FW_STATUS_READY))
5706                 return;
5707
5708         /* Wait for up to 1 millisecond for APE to service previous event. */
5709         for (i = 0; i < 10; i++) {
5710                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5711                         return;
5712
5713                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5714
5715                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5716                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5717                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5718
5719                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5720
5721                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5722                         break;
5723
5724                 udelay(100);
5725         }
5726
5727         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5728                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5729 }
5730
5731 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5732 {
5733         u32 event;
5734         u32 apedata;
5735
5736         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5737                 return;
5738
5739         switch (kind) {
5740                 case RESET_KIND_INIT:
5741                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5742                                         APE_HOST_SEG_SIG_MAGIC);
5743                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5744                                         APE_HOST_SEG_LEN_MAGIC);
5745                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5746                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5747                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5748                                         APE_HOST_DRIVER_ID_MAGIC);
5749                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5750                                         APE_HOST_BEHAV_NO_PHYLOCK);
5751
5752                         event = APE_EVENT_STATUS_STATE_START;
5753                         break;
5754                 case RESET_KIND_SHUTDOWN:
5755                         /* With the interface we are currently using,
5756                          * APE does not track driver state.  Wiping
5757                          * out the HOST SEGMENT SIGNATURE forces
5758                          * the APE to assume OS absent status.
5759                          */
5760                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5761
5762                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5763                         break;
5764                 case RESET_KIND_SUSPEND:
5765                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5766                         break;
5767                 default:
5768                         return;
5769         }
5770
5771         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5772
5773         tg3_ape_send_event(tp, event);
5774 }
5775
5776 /* tp->lock is held. */
5777 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5778 {
5779         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5780                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5781
5782         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5783                 switch (kind) {
5784                 case RESET_KIND_INIT:
5785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5786                                       DRV_STATE_START);
5787                         break;
5788
5789                 case RESET_KIND_SHUTDOWN:
5790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5791                                       DRV_STATE_UNLOAD);
5792                         break;
5793
5794                 case RESET_KIND_SUSPEND:
5795                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5796                                       DRV_STATE_SUSPEND);
5797                         break;
5798
5799                 default:
5800                         break;
5801                 }
5802         }
5803
5804         if (kind == RESET_KIND_INIT ||
5805             kind == RESET_KIND_SUSPEND)
5806                 tg3_ape_driver_state_change(tp, kind);
5807 }
5808
5809 /* tp->lock is held. */
5810 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5811 {
5812         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5813                 switch (kind) {
5814                 case RESET_KIND_INIT:
5815                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5816                                       DRV_STATE_START_DONE);
5817                         break;
5818
5819                 case RESET_KIND_SHUTDOWN:
5820                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5821                                       DRV_STATE_UNLOAD_DONE);
5822                         break;
5823
5824                 default:
5825                         break;
5826                 }
5827         }
5828
5829         if (kind == RESET_KIND_SHUTDOWN)
5830                 tg3_ape_driver_state_change(tp, kind);
5831 }
5832
5833 /* tp->lock is held. */
5834 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5835 {
5836         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5837                 switch (kind) {
5838                 case RESET_KIND_INIT:
5839                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5840                                       DRV_STATE_START);
5841                         break;
5842
5843                 case RESET_KIND_SHUTDOWN:
5844                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5845                                       DRV_STATE_UNLOAD);
5846                         break;
5847
5848                 case RESET_KIND_SUSPEND:
5849                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5850                                       DRV_STATE_SUSPEND);
5851                         break;
5852
5853                 default:
5854                         break;
5855                 }
5856         }
5857 }
5858
5859 static int tg3_poll_fw(struct tg3 *tp)
5860 {
5861         int i;
5862         u32 val;
5863
5864         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5865                 /* Wait up to 20ms for init done. */
5866                 for (i = 0; i < 200; i++) {
5867                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5868                                 return 0;
5869                         udelay(100);
5870                 }
5871                 return -ENODEV;
5872         }
5873
5874         /* Wait for firmware initialization to complete. */
5875         for (i = 0; i < 100000; i++) {
5876                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5877                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5878                         break;
5879                 udelay(10);
5880         }
5881
5882         /* Chip might not be fitted with firmware.  Some Sun onboard
5883          * parts are configured like that.  So don't signal the timeout
5884          * of the above loop as an error, but do report the lack of
5885          * running firmware once.
5886          */
5887         if (i >= 100000 &&
5888             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5889                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5890
5891                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5892                        tp->dev->name);
5893         }
5894
5895         return 0;
5896 }
5897
5898 /* Save PCI command register before chip reset */
5899 static void tg3_save_pci_state(struct tg3 *tp)
5900 {
5901         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5902 }
5903
5904 /* Restore PCI state after chip reset */
5905 static void tg3_restore_pci_state(struct tg3 *tp)
5906 {
5907         u32 val;
5908
5909         /* Re-enable indirect register accesses. */
5910         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5911                                tp->misc_host_ctrl);
5912
5913         /* Set MAX PCI retry to zero. */
5914         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5915         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5916             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5917                 val |= PCISTATE_RETRY_SAME_DMA;
5918         /* Allow reads and writes to the APE register and memory space. */
5919         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5920                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5921                        PCISTATE_ALLOW_APE_SHMEM_WR;
5922         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5923
5924         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5925
5926         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5927                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5928                         pcie_set_readrq(tp->pdev, 4096);
5929                 else {
5930                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5931                                               tp->pci_cacheline_sz);
5932                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5933                                               tp->pci_lat_timer);
5934                 }
5935         }
5936
5937         /* Make sure PCI-X relaxed ordering bit is clear. */
5938         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
5939                 u16 pcix_cmd;
5940
5941                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5942                                      &pcix_cmd);
5943                 pcix_cmd &= ~PCI_X_CMD_ERO;
5944                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5945                                       pcix_cmd);
5946         }
5947
5948         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5949
5950                 /* Chip reset on 5780 will reset MSI enable bit,
5951                  * so need to restore it.
5952                  */
5953                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5954                         u16 ctrl;
5955
5956                         pci_read_config_word(tp->pdev,
5957                                              tp->msi_cap + PCI_MSI_FLAGS,
5958                                              &ctrl);
5959                         pci_write_config_word(tp->pdev,
5960                                               tp->msi_cap + PCI_MSI_FLAGS,
5961                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5962                         val = tr32(MSGINT_MODE);
5963                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5964                 }
5965         }
5966 }
5967
5968 static void tg3_stop_fw(struct tg3 *);
5969
5970 /* tp->lock is held. */
5971 static int tg3_chip_reset(struct tg3 *tp)
5972 {
5973         u32 val;
5974         void (*write_op)(struct tg3 *, u32, u32);
5975         int err;
5976
5977         tg3_nvram_lock(tp);
5978
5979         tg3_mdio_stop(tp);
5980
5981         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5982
5983         /* No matching tg3_nvram_unlock() after this because
5984          * chip reset below will undo the nvram lock.
5985          */
5986         tp->nvram_lock_cnt = 0;
5987
5988         /* GRC_MISC_CFG core clock reset will clear the memory
5989          * enable bit in PCI register 4 and the MSI enable bit
5990          * on some chips, so we save relevant registers here.
5991          */
5992         tg3_save_pci_state(tp);
5993
5994         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5995             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5996             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5997             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5998             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5999             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
6000                 tw32(GRC_FASTBOOT_PC, 0);
6001
6002         /*
6003          * We must avoid the readl() that normally takes place.
6004          * It locks machines, causes machine checks, and other
6005          * fun things.  So, temporarily disable the 5701
6006          * hardware workaround, while we do the reset.
6007          */
6008         write_op = tp->write32;
6009         if (write_op == tg3_write_flush_reg32)
6010                 tp->write32 = tg3_write32;
6011
6012         /* Prevent the irq handler from reading or writing PCI registers
6013          * during chip reset when the memory enable bit in the PCI command
6014          * register may be cleared.  The chip does not generate interrupt
6015          * at this time, but the irq handler may still be called due to irq
6016          * sharing or irqpoll.
6017          */
6018         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6019         if (tp->hw_status) {
6020                 tp->hw_status->status = 0;
6021                 tp->hw_status->status_tag = 0;
6022         }
6023         tp->last_tag = 0;
6024         smp_mb();
6025         synchronize_irq(tp->pdev->irq);
6026
6027         /* do the reset */
6028         val = GRC_MISC_CFG_CORECLK_RESET;
6029
6030         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6031                 if (tr32(0x7e2c) == 0x60) {
6032                         tw32(0x7e2c, 0x20);
6033                 }
6034                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6035                         tw32(GRC_MISC_CFG, (1 << 29));
6036                         val |= (1 << 29);
6037                 }
6038         }
6039
6040         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6041                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6042                 tw32(GRC_VCPU_EXT_CTRL,
6043                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6044         }
6045
6046         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6047                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6048         tw32(GRC_MISC_CFG, val);
6049
6050         /* restore 5701 hardware bug workaround write method */
6051         tp->write32 = write_op;
6052
6053         /* Unfortunately, we have to delay before the PCI read back.
6054          * Some 575X chips even will not respond to a PCI cfg access
6055          * when the reset command is given to the chip.
6056          *
6057          * How do these hardware designers expect things to work
6058          * properly if the PCI write is posted for a long period
6059          * of time?  It is always necessary to have some method by
6060          * which a register read back can occur to push the write
6061          * out which does the reset.
6062          *
6063          * For most tg3 variants the trick below was working.
6064          * Ho hum...
6065          */
6066         udelay(120);
6067
6068         /* Flush PCI posted writes.  The normal MMIO registers
6069          * are inaccessible at this time so this is the only
6070          * way to make this reliably (actually, this is no longer
6071          * the case, see above).  I tried to use indirect
6072          * register read/write but this upset some 5701 variants.
6073          */
6074         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6075
6076         udelay(120);
6077
6078         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6079                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6080                         int i;
6081                         u32 cfg_val;
6082
6083                         /* Wait for link training to complete.  */
6084                         for (i = 0; i < 5000; i++)
6085                                 udelay(100);
6086
6087                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6088                         pci_write_config_dword(tp->pdev, 0xc4,
6089                                                cfg_val | (1 << 15));
6090                 }
6091
6092                 /* Set PCIE max payload size to 128 bytes and
6093                  * clear the "no snoop" and "relaxed ordering" bits.
6094                  */
6095                 pci_write_config_word(tp->pdev,
6096                                       tp->pcie_cap + PCI_EXP_DEVCTL,
6097                                       0);
6098
6099                 pcie_set_readrq(tp->pdev, 4096);
6100
6101                 /* Clear error status */
6102                 pci_write_config_word(tp->pdev,
6103                                       tp->pcie_cap + PCI_EXP_DEVSTA,
6104                                       PCI_EXP_DEVSTA_CED |
6105                                       PCI_EXP_DEVSTA_NFED |
6106                                       PCI_EXP_DEVSTA_FED |
6107                                       PCI_EXP_DEVSTA_URD);
6108         }
6109
6110         tg3_restore_pci_state(tp);
6111
6112         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6113
6114         val = 0;
6115         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6116                 val = tr32(MEMARB_MODE);
6117         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6118
6119         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6120                 tg3_stop_fw(tp);
6121                 tw32(0x5000, 0x400);
6122         }
6123
6124         tw32(GRC_MODE, tp->grc_mode);
6125
6126         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6127                 val = tr32(0xc4);
6128
6129                 tw32(0xc4, val | (1 << 15));
6130         }
6131
6132         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6133             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6134                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6135                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6136                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6137                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6138         }
6139
6140         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6141                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6142                 tw32_f(MAC_MODE, tp->mac_mode);
6143         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6144                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6145                 tw32_f(MAC_MODE, tp->mac_mode);
6146         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6147                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6148                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6149                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6150                 tw32_f(MAC_MODE, tp->mac_mode);
6151         } else
6152                 tw32_f(MAC_MODE, 0);
6153         udelay(40);
6154
6155         tg3_mdio_start(tp);
6156
6157         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6158
6159         err = tg3_poll_fw(tp);
6160         if (err)
6161                 return err;
6162
6163         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6164             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6165                 val = tr32(0x7c00);
6166
6167                 tw32(0x7c00, val | (1 << 25));
6168         }
6169
6170         /* Reprobe ASF enable state.  */
6171         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6172         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6173         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6174         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6175                 u32 nic_cfg;
6176
6177                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6178                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6179                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6180                         tp->last_event_jiffies = jiffies;
6181                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6182                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6183                 }
6184         }
6185
6186         return 0;
6187 }
6188
6189 /* tp->lock is held. */
6190 static void tg3_stop_fw(struct tg3 *tp)
6191 {
6192         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6193            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6194                 /* Wait for RX cpu to ACK the previous event. */
6195                 tg3_wait_for_event_ack(tp);
6196
6197                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6198
6199                 tg3_generate_fw_event(tp);
6200
6201                 /* Wait for RX cpu to ACK this event. */
6202                 tg3_wait_for_event_ack(tp);
6203         }
6204 }
6205
6206 /* tp->lock is held. */
6207 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6208 {
6209         int err;
6210
6211         tg3_stop_fw(tp);
6212
6213         tg3_write_sig_pre_reset(tp, kind);
6214
6215         tg3_abort_hw(tp, silent);
6216         err = tg3_chip_reset(tp);
6217
6218         tg3_write_sig_legacy(tp, kind);
6219         tg3_write_sig_post_reset(tp, kind);
6220
6221         if (err)
6222                 return err;
6223
6224         return 0;
6225 }
6226
6227 #define TG3_FW_RELEASE_MAJOR    0x0
6228 #define TG3_FW_RELASE_MINOR     0x0
6229 #define TG3_FW_RELEASE_FIX      0x0
6230 #define TG3_FW_START_ADDR       0x08000000
6231 #define TG3_FW_TEXT_ADDR        0x08000000
6232 #define TG3_FW_TEXT_LEN         0x9c0
6233 #define TG3_FW_RODATA_ADDR      0x080009c0
6234 #define TG3_FW_RODATA_LEN       0x60
6235 #define TG3_FW_DATA_ADDR        0x08000a40
6236 #define TG3_FW_DATA_LEN         0x20
6237 #define TG3_FW_SBSS_ADDR        0x08000a60
6238 #define TG3_FW_SBSS_LEN         0xc
6239 #define TG3_FW_BSS_ADDR         0x08000a70
6240 #define TG3_FW_BSS_LEN          0x10
6241
6242 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6243         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6244         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6245         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6246         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6247         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6248         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6249         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6250         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6251         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6252         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6253         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6254         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6255         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6256         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6257         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6258         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6259         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6260         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6261         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6262         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6263         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6264         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6265         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6266         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6267         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6268         0, 0, 0, 0, 0, 0,
6269         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6270         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6271         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6272         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6273         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6274         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6275         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6276         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6277         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6278         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6279         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6280         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6281         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6282         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6283         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6284         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6285         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6286         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6287         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6288         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6289         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6290         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6291         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6292         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6293         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6294         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6295         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6296         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6297         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6298         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6299         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6300         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6301         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6302         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6303         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6304         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6305         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6306         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6307         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6308         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6309         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6310         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6311         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6312         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6313         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6314         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6315         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6316         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6317         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6318         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6319         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6320         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6321         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6322         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6323         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6324         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6325         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6326         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6327         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6328         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6329         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6330         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6331         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6332         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6333         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6334 };
6335
6336 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6337         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6338         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6339         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6340         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6341         0x00000000
6342 };
6343
6344 #if 0 /* All zeros, don't eat up space with it. */
6345 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6346         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6347         0x00000000, 0x00000000, 0x00000000, 0x00000000
6348 };
6349 #endif
6350
6351 #define RX_CPU_SCRATCH_BASE     0x30000
6352 #define RX_CPU_SCRATCH_SIZE     0x04000
6353 #define TX_CPU_SCRATCH_BASE     0x34000
6354 #define TX_CPU_SCRATCH_SIZE     0x04000
6355
6356 /* tp->lock is held. */
6357 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6358 {
6359         int i;
6360
6361         BUG_ON(offset == TX_CPU_BASE &&
6362             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6363
6364         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6365                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6366
6367                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6368                 return 0;
6369         }
6370         if (offset == RX_CPU_BASE) {
6371                 for (i = 0; i < 10000; i++) {
6372                         tw32(offset + CPU_STATE, 0xffffffff);
6373                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6374                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6375                                 break;
6376                 }
6377
6378                 tw32(offset + CPU_STATE, 0xffffffff);
6379                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6380                 udelay(10);
6381         } else {
6382                 for (i = 0; i < 10000; i++) {
6383                         tw32(offset + CPU_STATE, 0xffffffff);
6384                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6385                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6386                                 break;
6387                 }
6388         }
6389
6390         if (i >= 10000) {
6391                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6392                        "and %s CPU\n",
6393                        tp->dev->name,
6394                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6395                 return -ENODEV;
6396         }
6397
6398         /* Clear firmware's nvram arbitration. */
6399         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6400                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6401         return 0;
6402 }
6403
6404 struct fw_info {
6405         unsigned int text_base;
6406         unsigned int text_len;
6407         const u32 *text_data;
6408         unsigned int rodata_base;
6409         unsigned int rodata_len;
6410         const u32 *rodata_data;
6411         unsigned int data_base;
6412         unsigned int data_len;
6413         const u32 *data_data;
6414 };
6415
6416 /* tp->lock is held. */
6417 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6418                                  int cpu_scratch_size, struct fw_info *info)
6419 {
6420         int err, lock_err, i;
6421         void (*write_op)(struct tg3 *, u32, u32);
6422
6423         if (cpu_base == TX_CPU_BASE &&
6424             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6425                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6426                        "TX cpu firmware on %s which is 5705.\n",
6427                        tp->dev->name);
6428                 return -EINVAL;
6429         }
6430
6431         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6432                 write_op = tg3_write_mem;
6433         else
6434                 write_op = tg3_write_indirect_reg32;
6435
6436         /* It is possible that bootcode is still loading at this point.
6437          * Get the nvram lock first before halting the cpu.
6438          */
6439         lock_err = tg3_nvram_lock(tp);
6440         err = tg3_halt_cpu(tp, cpu_base);
6441         if (!lock_err)
6442                 tg3_nvram_unlock(tp);
6443         if (err)
6444                 goto out;
6445
6446         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6447                 write_op(tp, cpu_scratch_base + i, 0);
6448         tw32(cpu_base + CPU_STATE, 0xffffffff);
6449         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6450         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6451                 write_op(tp, (cpu_scratch_base +
6452                               (info->text_base & 0xffff) +
6453                               (i * sizeof(u32))),
6454                          (info->text_data ?
6455                           info->text_data[i] : 0));
6456         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6457                 write_op(tp, (cpu_scratch_base +
6458                               (info->rodata_base & 0xffff) +
6459                               (i * sizeof(u32))),
6460                          (info->rodata_data ?
6461                           info->rodata_data[i] : 0));
6462         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6463                 write_op(tp, (cpu_scratch_base +
6464                               (info->data_base & 0xffff) +
6465                               (i * sizeof(u32))),
6466                          (info->data_data ?
6467                           info->data_data[i] : 0));
6468
6469         err = 0;
6470
6471 out:
6472         return err;
6473 }
6474
6475 /* tp->lock is held. */
6476 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6477 {
6478         struct fw_info info;
6479         int err, i;
6480
6481         info.text_base = TG3_FW_TEXT_ADDR;
6482         info.text_len = TG3_FW_TEXT_LEN;
6483         info.text_data = &tg3FwText[0];
6484         info.rodata_base = TG3_FW_RODATA_ADDR;
6485         info.rodata_len = TG3_FW_RODATA_LEN;
6486         info.rodata_data = &tg3FwRodata[0];
6487         info.data_base = TG3_FW_DATA_ADDR;
6488         info.data_len = TG3_FW_DATA_LEN;
6489         info.data_data = NULL;
6490
6491         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6492                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6493                                     &info);
6494         if (err)
6495                 return err;
6496
6497         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6498                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6499                                     &info);
6500         if (err)
6501                 return err;
6502
6503         /* Now startup only the RX cpu. */
6504         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6505         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6506
6507         for (i = 0; i < 5; i++) {
6508                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6509                         break;
6510                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6511                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6512                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6513                 udelay(1000);
6514         }
6515         if (i >= 5) {
6516                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6517                        "to set RX CPU PC, is %08x should be %08x\n",
6518                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6519                        TG3_FW_TEXT_ADDR);
6520                 return -ENODEV;
6521         }
6522         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6523         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6524
6525         return 0;
6526 }
6527
6528
6529 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6530 #define TG3_TSO_FW_RELASE_MINOR         0x6
6531 #define TG3_TSO_FW_RELEASE_FIX          0x0
6532 #define TG3_TSO_FW_START_ADDR           0x08000000
6533 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6534 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6535 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6536 #define TG3_TSO_FW_RODATA_LEN           0x60
6537 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6538 #define TG3_TSO_FW_DATA_LEN             0x30
6539 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6540 #define TG3_TSO_FW_SBSS_LEN             0x2c
6541 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6542 #define TG3_TSO_FW_BSS_LEN              0x894
6543
6544 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6545         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6546         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6547         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6548         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6549         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6550         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6551         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6552         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6553         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6554         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6555         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6556         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6557         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6558         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6559         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6560         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6561         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6562         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6563         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6564         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6565         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6566         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6567         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6568         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6569         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6570         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6571         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6572         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6573         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6574         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6575         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6576         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6577         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6578         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6579         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6580         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6581         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6582         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6583         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6584         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6585         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6586         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6587         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6588         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6589         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6590         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6591         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6592         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6593         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6594         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6595         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6596         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6597         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6598         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6599         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6600         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6601         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6602         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6603         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6604         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6605         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6606         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6607         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6608         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6609         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6610         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6611         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6612         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6613         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6614         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6615         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6616         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6617         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6618         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6619         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6620         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6621         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6622         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6623         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6624         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6625         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6626         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6627         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6628         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6629         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6630         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6631         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6632         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6633         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6634         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6635         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6636         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6637         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6638         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6639         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6640         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6641         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6642         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6643         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6644         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6645         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6646         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6647         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6648         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6649         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6650         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6651         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6652         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6653         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6654         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6655         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6656         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6657         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6658         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6659         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6660         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6661         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6662         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6663         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6664         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6665         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6666         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6667         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6668         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6669         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6670         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6671         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6672         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6673         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6674         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6675         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6676         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6677         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6678         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6679         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6680         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6681         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6682         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6683         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6684         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6685         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6686         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6687         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6688         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6689         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6690         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6691         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6692         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6693         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6694         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6695         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6696         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6697         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6698         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6699         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6700         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6701         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6702         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6703         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6704         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6705         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6706         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6707         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6708         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6709         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6710         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6711         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6712         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6713         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6714         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6715         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6716         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6717         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6718         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6719         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6720         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6721         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6722         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6723         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6724         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6725         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6726         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6727         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6728         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6729         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6730         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6731         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6732         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6733         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6734         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6735         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6736         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6737         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6738         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6739         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6740         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6741         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6742         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6743         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6744         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6745         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6746         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6747         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6748         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6749         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6750         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6751         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6752         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6753         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6754         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6755         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6756         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6757         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6758         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6759         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6760         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6761         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6762         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6763         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6764         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6765         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6766         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6767         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6768         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6769         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6770         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6771         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6772         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6773         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6774         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6775         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6776         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6777         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6778         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6779         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6780         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6781         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6782         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6783         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6784         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6785         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6786         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6787         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6788         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6789         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6790         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6791         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6792         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6793         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6794         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6795         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6796         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6797         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6798         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6799         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6800         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6801         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6802         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6803         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6804         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6805         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6806         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6807         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6808         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6809         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6810         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6811         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6812         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6813         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6814         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6815         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6816         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6817         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6818         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6819         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6820         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6821         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6822         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6823         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6824         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6825         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6826         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6827         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6828         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6829 };
6830
6831 static const u32 tg3TsoFwRodata[] = {
6832         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6833         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6834         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6835         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6836         0x00000000,
6837 };
6838
6839 static const u32 tg3TsoFwData[] = {
6840         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6841         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6842         0x00000000,
6843 };
6844
6845 /* 5705 needs a special version of the TSO firmware.  */
6846 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6847 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6848 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6849 #define TG3_TSO5_FW_START_ADDR          0x00010000
6850 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6851 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6852 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6853 #define TG3_TSO5_FW_RODATA_LEN          0x50
6854 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6855 #define TG3_TSO5_FW_DATA_LEN            0x20
6856 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6857 #define TG3_TSO5_FW_SBSS_LEN            0x28
6858 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6859 #define TG3_TSO5_FW_BSS_LEN             0x88
6860
6861 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6862         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6863         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6864         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6865         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6866         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6867         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6868         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6869         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6870         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6871         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6872         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6873         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6874         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6875         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6876         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6877         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6878         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6879         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6880         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6881         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6882         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6883         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6884         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6885         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6886         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6887         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6888         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6889         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6890         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6891         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6892         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6893         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6894         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6895         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6896         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6897         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6898         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6899         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6900         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6901         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6902         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6903         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6904         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6905         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6906         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6907         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6908         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6909         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6910         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6911         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6912         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6913         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6914         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6915         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6916         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6917         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6918         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6919         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6920         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6921         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6922         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6923         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6924         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6925         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6926         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6927         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6928         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6929         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6930         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6931         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6932         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6933         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6934         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6935         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6936         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6937         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6938         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6939         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6940         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6941         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6942         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6943         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6944         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6945         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6946         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6947         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6948         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6949         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6950         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6951         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6952         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6953         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6954         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6955         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6956         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6957         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6958         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6959         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6960         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6961         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6962         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6963         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6964         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6965         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6966         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6967         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6968         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6969         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6970         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6971         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6972         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6973         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6974         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6975         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6976         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6977         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6978         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6979         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6980         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6981         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6982         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6983         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6984         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6985         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6986         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6987         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6988         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6989         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6990         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6991         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6992         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6993         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6994         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6995         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6996         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6997         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6998         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6999         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
7000         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
7001         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
7002         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
7003         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
7004         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
7005         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
7006         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
7007         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
7008         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
7009         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
7010         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
7011         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
7012         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
7013         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
7014         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
7015         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
7016         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
7017         0x00000000, 0x00000000, 0x00000000,
7018 };
7019
7020 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
7021         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
7022         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
7023         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
7024         0x00000000, 0x00000000, 0x00000000,
7025 };
7026
7027 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
7028         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
7029         0x00000000, 0x00000000, 0x00000000,
7030 };
7031
7032 /* tp->lock is held. */
7033 static int tg3_load_tso_firmware(struct tg3 *tp)
7034 {
7035         struct fw_info info;
7036         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7037         int err, i;
7038
7039         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7040                 return 0;
7041
7042         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7043                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
7044                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
7045                 info.text_data = &tg3Tso5FwText[0];
7046                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
7047                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
7048                 info.rodata_data = &tg3Tso5FwRodata[0];
7049                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
7050                 info.data_len = TG3_TSO5_FW_DATA_LEN;
7051                 info.data_data = &tg3Tso5FwData[0];
7052                 cpu_base = RX_CPU_BASE;
7053                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7054                 cpu_scratch_size = (info.text_len +
7055                                     info.rodata_len +
7056                                     info.data_len +
7057                                     TG3_TSO5_FW_SBSS_LEN +
7058                                     TG3_TSO5_FW_BSS_LEN);
7059         } else {
7060                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
7061                 info.text_len = TG3_TSO_FW_TEXT_LEN;
7062                 info.text_data = &tg3TsoFwText[0];
7063                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
7064                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
7065                 info.rodata_data = &tg3TsoFwRodata[0];
7066                 info.data_base = TG3_TSO_FW_DATA_ADDR;
7067                 info.data_len = TG3_TSO_FW_DATA_LEN;
7068                 info.data_data = &tg3TsoFwData[0];
7069                 cpu_base = TX_CPU_BASE;
7070                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7071                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7072         }
7073
7074         err = tg3_load_firmware_cpu(tp, cpu_base,
7075                                     cpu_scratch_base, cpu_scratch_size,
7076                                     &info);
7077         if (err)
7078                 return err;
7079
7080         /* Now startup the cpu. */
7081         tw32(cpu_base + CPU_STATE, 0xffffffff);
7082         tw32_f(cpu_base + CPU_PC,    info.text_base);
7083
7084         for (i = 0; i < 5; i++) {
7085                 if (tr32(cpu_base + CPU_PC) == info.text_base)
7086                         break;
7087                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7088                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7089                 tw32_f(cpu_base + CPU_PC,    info.text_base);
7090                 udelay(1000);
7091         }
7092         if (i >= 5) {
7093                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7094                        "to set CPU PC, is %08x should be %08x\n",
7095                        tp->dev->name, tr32(cpu_base + CPU_PC),
7096                        info.text_base);
7097                 return -ENODEV;
7098         }
7099         tw32(cpu_base + CPU_STATE, 0xffffffff);
7100         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7101         return 0;
7102 }
7103
7104
7105 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7106 {
7107         struct tg3 *tp = netdev_priv(dev);
7108         struct sockaddr *addr = p;
7109         int err = 0, skip_mac_1 = 0;
7110
7111         if (!is_valid_ether_addr(addr->sa_data))
7112                 return -EINVAL;
7113
7114         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7115
7116         if (!netif_running(dev))
7117                 return 0;
7118
7119         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7120                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7121
7122                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7123                 addr0_low = tr32(MAC_ADDR_0_LOW);
7124                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7125                 addr1_low = tr32(MAC_ADDR_1_LOW);
7126
7127                 /* Skip MAC addr 1 if ASF is using it. */
7128                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7129                     !(addr1_high == 0 && addr1_low == 0))
7130                         skip_mac_1 = 1;
7131         }
7132         spin_lock_bh(&tp->lock);
7133         __tg3_set_mac_addr(tp, skip_mac_1);
7134         spin_unlock_bh(&tp->lock);
7135
7136         return err;
7137 }
7138
7139 /* tp->lock is held. */
7140 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7141                            dma_addr_t mapping, u32 maxlen_flags,
7142                            u32 nic_addr)
7143 {
7144         tg3_write_mem(tp,
7145                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7146                       ((u64) mapping >> 32));
7147         tg3_write_mem(tp,
7148                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7149                       ((u64) mapping & 0xffffffff));
7150         tg3_write_mem(tp,
7151                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7152                        maxlen_flags);
7153
7154         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7155                 tg3_write_mem(tp,
7156                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7157                               nic_addr);
7158 }
7159
7160 static void __tg3_set_rx_mode(struct net_device *);
7161 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7162 {
7163         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7164         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7165         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7166         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7167         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7168                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7169                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7170         }
7171         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7172         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7173         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7174                 u32 val = ec->stats_block_coalesce_usecs;
7175
7176                 if (!netif_carrier_ok(tp->dev))
7177                         val = 0;
7178
7179                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7180         }
7181 }
7182
7183 /* tp->lock is held. */
7184 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7185 {
7186         u32 val, rdmac_mode;
7187         int i, err, limit;
7188
7189         tg3_disable_ints(tp);
7190
7191         tg3_stop_fw(tp);
7192
7193         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7194
7195         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7196                 tg3_abort_hw(tp, 1);
7197         }
7198
7199         if (reset_phy &&
7200             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7201                 tg3_phy_reset(tp);
7202
7203         err = tg3_chip_reset(tp);
7204         if (err)
7205                 return err;
7206
7207         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7208
7209         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7210                 val = tr32(TG3_CPMU_CTRL);
7211                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7212                 tw32(TG3_CPMU_CTRL, val);
7213
7214                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7215                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7216                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7217                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7218
7219                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7220                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7221                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7222                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7223
7224                 val = tr32(TG3_CPMU_HST_ACC);
7225                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7226                 val |= CPMU_HST_ACC_MACCLK_6_25;
7227                 tw32(TG3_CPMU_HST_ACC, val);
7228         }
7229
7230         /* This works around an issue with Athlon chipsets on
7231          * B3 tigon3 silicon.  This bit has no effect on any
7232          * other revision.  But do not set this on PCI Express
7233          * chips and don't even touch the clocks if the CPMU is present.
7234          */
7235         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7236                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7237                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7238                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7239         }
7240
7241         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7242             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7243                 val = tr32(TG3PCI_PCISTATE);
7244                 val |= PCISTATE_RETRY_SAME_DMA;
7245                 tw32(TG3PCI_PCISTATE, val);
7246         }
7247
7248         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7249                 /* Allow reads and writes to the
7250                  * APE register and memory space.
7251                  */
7252                 val = tr32(TG3PCI_PCISTATE);
7253                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7254                        PCISTATE_ALLOW_APE_SHMEM_WR;
7255                 tw32(TG3PCI_PCISTATE, val);
7256         }
7257
7258         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7259                 /* Enable some hw fixes.  */
7260                 val = tr32(TG3PCI_MSI_DATA);
7261                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7262                 tw32(TG3PCI_MSI_DATA, val);
7263         }
7264
7265         /* Descriptor ring init may make accesses to the
7266          * NIC SRAM area to setup the TX descriptors, so we
7267          * can only do this after the hardware has been
7268          * successfully reset.
7269          */
7270         err = tg3_init_rings(tp);
7271         if (err)
7272                 return err;
7273
7274         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7275             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7276                 /* This value is determined during the probe time DMA
7277                  * engine test, tg3_test_dma.
7278                  */
7279                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7280         }
7281
7282         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7283                           GRC_MODE_4X_NIC_SEND_RINGS |
7284                           GRC_MODE_NO_TX_PHDR_CSUM |
7285                           GRC_MODE_NO_RX_PHDR_CSUM);
7286         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7287
7288         /* Pseudo-header checksum is done by hardware logic and not
7289          * the offload processers, so make the chip do the pseudo-
7290          * header checksums on receive.  For transmit it is more
7291          * convenient to do the pseudo-header checksum in software
7292          * as Linux does that on transmit for us in all cases.
7293          */
7294         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7295
7296         tw32(GRC_MODE,
7297              tp->grc_mode |
7298              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7299
7300         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7301         val = tr32(GRC_MISC_CFG);
7302         val &= ~0xff;
7303         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7304         tw32(GRC_MISC_CFG, val);
7305
7306         /* Initialize MBUF/DESC pool. */
7307         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7308                 /* Do nothing.  */
7309         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7310                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7311                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7312                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7313                 else
7314                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7315                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7316                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7317         }
7318         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7319                 int fw_len;
7320
7321                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7322                           TG3_TSO5_FW_RODATA_LEN +
7323                           TG3_TSO5_FW_DATA_LEN +
7324                           TG3_TSO5_FW_SBSS_LEN +
7325                           TG3_TSO5_FW_BSS_LEN);
7326                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7327                 tw32(BUFMGR_MB_POOL_ADDR,
7328                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7329                 tw32(BUFMGR_MB_POOL_SIZE,
7330                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7331         }
7332
7333         if (tp->dev->mtu <= ETH_DATA_LEN) {
7334                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7335                      tp->bufmgr_config.mbuf_read_dma_low_water);
7336                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7337                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7338                 tw32(BUFMGR_MB_HIGH_WATER,
7339                      tp->bufmgr_config.mbuf_high_water);
7340         } else {
7341                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7342                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7343                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7344                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7345                 tw32(BUFMGR_MB_HIGH_WATER,
7346                      tp->bufmgr_config.mbuf_high_water_jumbo);
7347         }
7348         tw32(BUFMGR_DMA_LOW_WATER,
7349              tp->bufmgr_config.dma_low_water);
7350         tw32(BUFMGR_DMA_HIGH_WATER,
7351              tp->bufmgr_config.dma_high_water);
7352
7353         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7354         for (i = 0; i < 2000; i++) {
7355                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7356                         break;
7357                 udelay(10);
7358         }
7359         if (i >= 2000) {
7360                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7361                        tp->dev->name);
7362                 return -ENODEV;
7363         }
7364
7365         /* Setup replenish threshold. */
7366         val = tp->rx_pending / 8;
7367         if (val == 0)
7368                 val = 1;
7369         else if (val > tp->rx_std_max_post)
7370                 val = tp->rx_std_max_post;
7371         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7372                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7373                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7374
7375                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7376                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7377         }
7378
7379         tw32(RCVBDI_STD_THRESH, val);
7380
7381         /* Initialize TG3_BDINFO's at:
7382          *  RCVDBDI_STD_BD:     standard eth size rx ring
7383          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7384          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7385          *
7386          * like so:
7387          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7388          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7389          *                              ring attribute flags
7390          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7391          *
7392          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7393          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7394          *
7395          * The size of each ring is fixed in the firmware, but the location is
7396          * configurable.
7397          */
7398         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7399              ((u64) tp->rx_std_mapping >> 32));
7400         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7401              ((u64) tp->rx_std_mapping & 0xffffffff));
7402         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7403              NIC_SRAM_RX_BUFFER_DESC);
7404
7405         /* Don't even try to program the JUMBO/MINI buffer descriptor
7406          * configs on 5705.
7407          */
7408         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7409                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7410                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7411         } else {
7412                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7413                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7414
7415                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7416                      BDINFO_FLAGS_DISABLED);
7417
7418                 /* Setup replenish threshold. */
7419                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7420
7421                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7422                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7423                              ((u64) tp->rx_jumbo_mapping >> 32));
7424                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7425                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7426                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7427                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7428                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7429                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7430                 } else {
7431                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7432                              BDINFO_FLAGS_DISABLED);
7433                 }
7434
7435         }
7436
7437         /* There is only one send ring on 5705/5750, no need to explicitly
7438          * disable the others.
7439          */
7440         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7441                 /* Clear out send RCB ring in SRAM. */
7442                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7443                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7444                                       BDINFO_FLAGS_DISABLED);
7445         }
7446
7447         tp->tx_prod = 0;
7448         tp->tx_cons = 0;
7449         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7450         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7451
7452         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7453                        tp->tx_desc_mapping,
7454                        (TG3_TX_RING_SIZE <<
7455                         BDINFO_FLAGS_MAXLEN_SHIFT),
7456                        NIC_SRAM_TX_BUFFER_DESC);
7457
7458         /* There is only one receive return ring on 5705/5750, no need
7459          * to explicitly disable the others.
7460          */
7461         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7462                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7463                      i += TG3_BDINFO_SIZE) {
7464                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7465                                       BDINFO_FLAGS_DISABLED);
7466                 }
7467         }
7468
7469         tp->rx_rcb_ptr = 0;
7470         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7471
7472         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7473                        tp->rx_rcb_mapping,
7474                        (TG3_RX_RCB_RING_SIZE(tp) <<
7475                         BDINFO_FLAGS_MAXLEN_SHIFT),
7476                        0);
7477
7478         tp->rx_std_ptr = tp->rx_pending;
7479         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7480                      tp->rx_std_ptr);
7481
7482         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7483                                                 tp->rx_jumbo_pending : 0;
7484         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7485                      tp->rx_jumbo_ptr);
7486
7487         /* Initialize MAC address and backoff seed. */
7488         __tg3_set_mac_addr(tp, 0);
7489
7490         /* MTU + ethernet header + FCS + optional VLAN tag */
7491         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7492
7493         /* The slot time is changed by tg3_setup_phy if we
7494          * run at gigabit with half duplex.
7495          */
7496         tw32(MAC_TX_LENGTHS,
7497              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7498              (6 << TX_LENGTHS_IPG_SHIFT) |
7499              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7500
7501         /* Receive rules. */
7502         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7503         tw32(RCVLPC_CONFIG, 0x0181);
7504
7505         /* Calculate RDMAC_MODE setting early, we need it to determine
7506          * the RCVLPC_STATE_ENABLE mask.
7507          */
7508         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7509                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7510                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7511                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7512                       RDMAC_MODE_LNGREAD_ENAB);
7513
7514         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7515             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7516                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7517                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7518                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7519
7520         /* If statement applies to 5705 and 5750 PCI devices only */
7521         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7522              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7523             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7524                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7525                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7526                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7527                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7528                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7529                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7530                 }
7531         }
7532
7533         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7534                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7535
7536         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7537                 rdmac_mode |= (1 << 27);
7538
7539         /* Receive/send statistics. */
7540         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7541                 val = tr32(RCVLPC_STATS_ENABLE);
7542                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7543                 tw32(RCVLPC_STATS_ENABLE, val);
7544         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7545                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7546                 val = tr32(RCVLPC_STATS_ENABLE);
7547                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7548                 tw32(RCVLPC_STATS_ENABLE, val);
7549         } else {
7550                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7551         }
7552         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7553         tw32(SNDDATAI_STATSENAB, 0xffffff);
7554         tw32(SNDDATAI_STATSCTRL,
7555              (SNDDATAI_SCTRL_ENABLE |
7556               SNDDATAI_SCTRL_FASTUPD));
7557
7558         /* Setup host coalescing engine. */
7559         tw32(HOSTCC_MODE, 0);
7560         for (i = 0; i < 2000; i++) {
7561                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7562                         break;
7563                 udelay(10);
7564         }
7565
7566         __tg3_set_coalesce(tp, &tp->coal);
7567
7568         /* set status block DMA address */
7569         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7570              ((u64) tp->status_mapping >> 32));
7571         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7572              ((u64) tp->status_mapping & 0xffffffff));
7573
7574         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7575                 /* Status/statistics block address.  See tg3_timer,
7576                  * the tg3_periodic_fetch_stats call there, and
7577                  * tg3_get_stats to see how this works for 5705/5750 chips.
7578                  */
7579                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7580                      ((u64) tp->stats_mapping >> 32));
7581                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7582                      ((u64) tp->stats_mapping & 0xffffffff));
7583                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7584                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7585         }
7586
7587         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7588
7589         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7590         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7591         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7592                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7593
7594         /* Clear statistics/status block in chip, and status block in ram. */
7595         for (i = NIC_SRAM_STATS_BLK;
7596              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7597              i += sizeof(u32)) {
7598                 tg3_write_mem(tp, i, 0);
7599                 udelay(40);
7600         }
7601         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7602
7603         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7604                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7605                 /* reset to prevent losing 1st rx packet intermittently */
7606                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7607                 udelay(10);
7608         }
7609
7610         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7611                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7612         else
7613                 tp->mac_mode = 0;
7614         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7615                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7616         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7617             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7618             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7619                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7620         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7621         udelay(40);
7622
7623         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7624          * If TG3_FLG2_IS_NIC is zero, we should read the
7625          * register to preserve the GPIO settings for LOMs. The GPIOs,
7626          * whether used as inputs or outputs, are set by boot code after
7627          * reset.
7628          */
7629         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7630                 u32 gpio_mask;
7631
7632                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7633                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7634                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7635
7636                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7637                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7638                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7639
7640                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7641                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7642
7643                 tp->grc_local_ctrl &= ~gpio_mask;
7644                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7645
7646                 /* GPIO1 must be driven high for eeprom write protect */
7647                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7648                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7649                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7650         }
7651         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7652         udelay(100);
7653
7654         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7655         tp->last_tag = 0;
7656
7657         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7658                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7659                 udelay(40);
7660         }
7661
7662         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7663                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7664                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7665                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7666                WDMAC_MODE_LNGREAD_ENAB);
7667
7668         /* If statement applies to 5705 and 5750 PCI devices only */
7669         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7670              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7671             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7672                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7673                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7674                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7675                         /* nothing */
7676                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7677                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7678                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7679                         val |= WDMAC_MODE_RX_ACCEL;
7680                 }
7681         }
7682
7683         /* Enable host coalescing bug fix */
7684         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7685             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7686             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7687             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7688             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7689                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7690
7691         tw32_f(WDMAC_MODE, val);
7692         udelay(40);
7693
7694         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7695                 u16 pcix_cmd;
7696
7697                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7698                                      &pcix_cmd);
7699                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7700                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7701                         pcix_cmd |= PCI_X_CMD_READ_2K;
7702                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7703                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7704                         pcix_cmd |= PCI_X_CMD_READ_2K;
7705                 }
7706                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7707                                       pcix_cmd);
7708         }
7709
7710         tw32_f(RDMAC_MODE, rdmac_mode);
7711         udelay(40);
7712
7713         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7714         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7715                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7716
7717         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7718                 tw32(SNDDATAC_MODE,
7719                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7720         else
7721                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7722
7723         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7724         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7725         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7726         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7727         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7728                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7729         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7730         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7731
7732         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7733                 err = tg3_load_5701_a0_firmware_fix(tp);
7734                 if (err)
7735                         return err;
7736         }
7737
7738         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7739                 err = tg3_load_tso_firmware(tp);
7740                 if (err)
7741                         return err;
7742         }
7743
7744         tp->tx_mode = TX_MODE_ENABLE;
7745         tw32_f(MAC_TX_MODE, tp->tx_mode);
7746         udelay(100);
7747
7748         tp->rx_mode = RX_MODE_ENABLE;
7749         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7750             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7751             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7752             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7753                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7754
7755         tw32_f(MAC_RX_MODE, tp->rx_mode);
7756         udelay(10);
7757
7758         tw32(MAC_LED_CTRL, tp->led_ctrl);
7759
7760         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7761         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7762                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7763                 udelay(10);
7764         }
7765         tw32_f(MAC_RX_MODE, tp->rx_mode);
7766         udelay(10);
7767
7768         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7769                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7770                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7771                         /* Set drive transmission level to 1.2V  */
7772                         /* only if the signal pre-emphasis bit is not set  */
7773                         val = tr32(MAC_SERDES_CFG);
7774                         val &= 0xfffff000;
7775                         val |= 0x880;
7776                         tw32(MAC_SERDES_CFG, val);
7777                 }
7778                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7779                         tw32(MAC_SERDES_CFG, 0x616000);
7780         }
7781
7782         /* Prevent chip from dropping frames when flow control
7783          * is enabled.
7784          */
7785         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7786
7787         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7788             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7789                 /* Use hardware link auto-negotiation */
7790                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7791         }
7792
7793         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7794             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7795                 u32 tmp;
7796
7797                 tmp = tr32(SERDES_RX_CTRL);
7798                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7799                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7800                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7801                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7802         }
7803
7804         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7805                 if (tp->link_config.phy_is_low_power) {
7806                         tp->link_config.phy_is_low_power = 0;
7807                         tp->link_config.speed = tp->link_config.orig_speed;
7808                         tp->link_config.duplex = tp->link_config.orig_duplex;
7809                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7810                 }
7811
7812                 err = tg3_setup_phy(tp, 0);
7813                 if (err)
7814                         return err;
7815
7816                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7817                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7818                         u32 tmp;
7819
7820                         /* Clear CRC stats. */
7821                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7822                                 tg3_writephy(tp, MII_TG3_TEST1,
7823                                              tmp | MII_TG3_TEST1_CRC_EN);
7824                                 tg3_readphy(tp, 0x14, &tmp);
7825                         }
7826                 }
7827         }
7828
7829         __tg3_set_rx_mode(tp->dev);
7830
7831         /* Initialize receive rules. */
7832         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7833         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7834         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7835         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7836
7837         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7838             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7839                 limit = 8;
7840         else
7841                 limit = 16;
7842         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7843                 limit -= 4;
7844         switch (limit) {
7845         case 16:
7846                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7847         case 15:
7848                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7849         case 14:
7850                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7851         case 13:
7852                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7853         case 12:
7854                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7855         case 11:
7856                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7857         case 10:
7858                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7859         case 9:
7860                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7861         case 8:
7862                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7863         case 7:
7864                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7865         case 6:
7866                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7867         case 5:
7868                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7869         case 4:
7870                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7871         case 3:
7872                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7873         case 2:
7874         case 1:
7875
7876         default:
7877                 break;
7878         }
7879
7880         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7881                 /* Write our heartbeat update interval to APE. */
7882                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7883                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7884
7885         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7886
7887         return 0;
7888 }
7889
7890 /* Called at device open time to get the chip ready for
7891  * packet processing.  Invoked with tp->lock held.
7892  */
7893 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7894 {
7895         tg3_switch_clocks(tp);
7896
7897         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7898
7899         return tg3_reset_hw(tp, reset_phy);
7900 }
7901
7902 #define TG3_STAT_ADD32(PSTAT, REG) \
7903 do {    u32 __val = tr32(REG); \
7904         (PSTAT)->low += __val; \
7905         if ((PSTAT)->low < __val) \
7906                 (PSTAT)->high += 1; \
7907 } while (0)
7908
7909 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7910 {
7911         struct tg3_hw_stats *sp = tp->hw_stats;
7912
7913         if (!netif_carrier_ok(tp->dev))
7914                 return;
7915
7916         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7917         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7918         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7919         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7920         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7921         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7922         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7923         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7924         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7925         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7926         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7927         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7928         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7929
7930         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7931         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7932         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7933         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7934         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7935         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7936         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7937         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7938         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7939         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7940         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7941         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7942         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7943         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7944
7945         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7946         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7947         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7948 }
7949
7950 static void tg3_timer(unsigned long __opaque)
7951 {
7952         struct tg3 *tp = (struct tg3 *) __opaque;
7953
7954         if (tp->irq_sync)
7955                 goto restart_timer;
7956
7957         spin_lock(&tp->lock);
7958
7959         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7960                 /* All of this garbage is because when using non-tagged
7961                  * IRQ status the mailbox/status_block protocol the chip
7962                  * uses with the cpu is race prone.
7963                  */
7964                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7965                         tw32(GRC_LOCAL_CTRL,
7966                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7967                 } else {
7968                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7969                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7970                 }
7971
7972                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7973                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7974                         spin_unlock(&tp->lock);
7975                         schedule_work(&tp->reset_task);
7976                         return;
7977                 }
7978         }
7979
7980         /* This part only runs once per second. */
7981         if (!--tp->timer_counter) {
7982                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7983                         tg3_periodic_fetch_stats(tp);
7984
7985                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7986                         u32 mac_stat;
7987                         int phy_event;
7988
7989                         mac_stat = tr32(MAC_STATUS);
7990
7991                         phy_event = 0;
7992                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7993                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7994                                         phy_event = 1;
7995                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7996                                 phy_event = 1;
7997
7998                         if (phy_event)
7999                                 tg3_setup_phy(tp, 0);
8000                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8001                         u32 mac_stat = tr32(MAC_STATUS);
8002                         int need_setup = 0;
8003
8004                         if (netif_carrier_ok(tp->dev) &&
8005                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8006                                 need_setup = 1;
8007                         }
8008                         if (! netif_carrier_ok(tp->dev) &&
8009                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8010                                          MAC_STATUS_SIGNAL_DET))) {
8011                                 need_setup = 1;
8012                         }
8013                         if (need_setup) {
8014                                 if (!tp->serdes_counter) {
8015                                         tw32_f(MAC_MODE,
8016                                              (tp->mac_mode &
8017                                               ~MAC_MODE_PORT_MODE_MASK));
8018                                         udelay(40);
8019                                         tw32_f(MAC_MODE, tp->mac_mode);
8020                                         udelay(40);
8021                                 }
8022                                 tg3_setup_phy(tp, 0);
8023                         }
8024                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8025                         tg3_serdes_parallel_detect(tp);
8026
8027                 tp->timer_counter = tp->timer_multiplier;
8028         }
8029
8030         /* Heartbeat is only sent once every 2 seconds.
8031          *
8032          * The heartbeat is to tell the ASF firmware that the host
8033          * driver is still alive.  In the event that the OS crashes,
8034          * ASF needs to reset the hardware to free up the FIFO space
8035          * that may be filled with rx packets destined for the host.
8036          * If the FIFO is full, ASF will no longer function properly.
8037          *
8038          * Unintended resets have been reported on real time kernels
8039          * where the timer doesn't run on time.  Netpoll will also have
8040          * same problem.
8041          *
8042          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8043          * to check the ring condition when the heartbeat is expiring
8044          * before doing the reset.  This will prevent most unintended
8045          * resets.
8046          */
8047         if (!--tp->asf_counter) {
8048                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8049                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8050                         tg3_wait_for_event_ack(tp);
8051
8052                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8053                                       FWCMD_NICDRV_ALIVE3);
8054                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8055                         /* 5 seconds timeout */
8056                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
8057
8058                         tg3_generate_fw_event(tp);
8059                 }
8060                 tp->asf_counter = tp->asf_multiplier;
8061         }
8062
8063         spin_unlock(&tp->lock);
8064
8065 restart_timer:
8066         tp->timer.expires = jiffies + tp->timer_offset;
8067         add_timer(&tp->timer);
8068 }
8069
8070 static int tg3_request_irq(struct tg3 *tp)
8071 {
8072         irq_handler_t fn;
8073         unsigned long flags;
8074         struct net_device *dev = tp->dev;
8075
8076         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8077                 fn = tg3_msi;
8078                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8079                         fn = tg3_msi_1shot;
8080                 flags = IRQF_SAMPLE_RANDOM;
8081         } else {
8082                 fn = tg3_interrupt;
8083                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8084                         fn = tg3_interrupt_tagged;
8085                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8086         }
8087         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
8088 }
8089
8090 static int tg3_test_interrupt(struct tg3 *tp)
8091 {
8092         struct net_device *dev = tp->dev;
8093         int err, i, intr_ok = 0;
8094
8095         if (!netif_running(dev))
8096                 return -ENODEV;
8097
8098         tg3_disable_ints(tp);
8099
8100         free_irq(tp->pdev->irq, dev);
8101
8102         err = request_irq(tp->pdev->irq, tg3_test_isr,
8103                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
8104         if (err)
8105                 return err;
8106
8107         tp->hw_status->status &= ~SD_STATUS_UPDATED;
8108         tg3_enable_ints(tp);
8109
8110         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8111                HOSTCC_MODE_NOW);
8112
8113         for (i = 0; i < 5; i++) {
8114                 u32 int_mbox, misc_host_ctrl;
8115
8116                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
8117                                         TG3_64BIT_REG_LOW);
8118                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8119
8120                 if ((int_mbox != 0) ||
8121                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8122                         intr_ok = 1;
8123                         break;
8124                 }
8125
8126                 msleep(10);
8127         }
8128
8129         tg3_disable_ints(tp);
8130
8131         free_irq(tp->pdev->irq, dev);
8132
8133         err = tg3_request_irq(tp);
8134
8135         if (err)
8136                 return err;
8137
8138         if (intr_ok)
8139                 return 0;
8140
8141         return -EIO;
8142 }
8143
8144 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8145  * successfully restored
8146  */
8147 static int tg3_test_msi(struct tg3 *tp)
8148 {
8149         struct net_device *dev = tp->dev;
8150         int err;
8151         u16 pci_cmd;
8152
8153         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8154                 return 0;
8155
8156         /* Turn off SERR reporting in case MSI terminates with Master
8157          * Abort.
8158          */
8159         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8160         pci_write_config_word(tp->pdev, PCI_COMMAND,
8161                               pci_cmd & ~PCI_COMMAND_SERR);
8162
8163         err = tg3_test_interrupt(tp);
8164
8165         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8166
8167         if (!err)
8168                 return 0;
8169
8170         /* other failures */
8171         if (err != -EIO)
8172                 return err;
8173
8174         /* MSI test failed, go back to INTx mode */
8175         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8176                "switching to INTx mode. Please report this failure to "
8177                "the PCI maintainer and include system chipset information.\n",
8178                        tp->dev->name);
8179
8180         free_irq(tp->pdev->irq, dev);
8181         pci_disable_msi(tp->pdev);
8182
8183         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8184
8185         err = tg3_request_irq(tp);
8186         if (err)
8187                 return err;
8188
8189         /* Need to reset the chip because the MSI cycle may have terminated
8190          * with Master Abort.
8191          */
8192         tg3_full_lock(tp, 1);
8193
8194         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8195         err = tg3_init_hw(tp, 1);
8196
8197         tg3_full_unlock(tp);
8198
8199         if (err)
8200                 free_irq(tp->pdev->irq, dev);
8201
8202         return err;
8203 }
8204
8205 static int tg3_open(struct net_device *dev)
8206 {
8207         struct tg3 *tp = netdev_priv(dev);
8208         int err;
8209
8210         netif_carrier_off(tp->dev);
8211
8212         err = tg3_set_power_state(tp, PCI_D0);
8213         if (err)
8214                 return err;
8215
8216         tg3_full_lock(tp, 0);
8217
8218         tg3_disable_ints(tp);
8219         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8220
8221         tg3_full_unlock(tp);
8222
8223         /* The placement of this call is tied
8224          * to the setup and use of Host TX descriptors.
8225          */
8226         err = tg3_alloc_consistent(tp);
8227         if (err)
8228                 return err;
8229
8230         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8231                 /* All MSI supporting chips should support tagged
8232                  * status.  Assert that this is the case.
8233                  */
8234                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8235                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8236                                "Not using MSI.\n", tp->dev->name);
8237                 } else if (pci_enable_msi(tp->pdev) == 0) {
8238                         u32 msi_mode;
8239
8240                         msi_mode = tr32(MSGINT_MODE);
8241                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8242                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8243                 }
8244         }
8245         err = tg3_request_irq(tp);
8246
8247         if (err) {
8248                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8249                         pci_disable_msi(tp->pdev);
8250                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8251                 }
8252                 tg3_free_consistent(tp);
8253                 return err;
8254         }
8255
8256         napi_enable(&tp->napi);
8257
8258         tg3_full_lock(tp, 0);
8259
8260         err = tg3_init_hw(tp, 1);
8261         if (err) {
8262                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8263                 tg3_free_rings(tp);
8264         } else {
8265                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8266                         tp->timer_offset = HZ;
8267                 else
8268                         tp->timer_offset = HZ / 10;
8269
8270                 BUG_ON(tp->timer_offset > HZ);
8271                 tp->timer_counter = tp->timer_multiplier =
8272                         (HZ / tp->timer_offset);
8273                 tp->asf_counter = tp->asf_multiplier =
8274                         ((HZ / tp->timer_offset) * 2);
8275
8276                 init_timer(&tp->timer);
8277                 tp->timer.expires = jiffies + tp->timer_offset;
8278                 tp->timer.data = (unsigned long) tp;
8279                 tp->timer.function = tg3_timer;
8280         }
8281
8282         tg3_full_unlock(tp);
8283
8284         if (err) {
8285                 napi_disable(&tp->napi);
8286                 free_irq(tp->pdev->irq, dev);
8287                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8288                         pci_disable_msi(tp->pdev);
8289                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8290                 }
8291                 tg3_free_consistent(tp);
8292                 return err;
8293         }
8294
8295         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8296                 err = tg3_test_msi(tp);
8297
8298                 if (err) {
8299                         tg3_full_lock(tp, 0);
8300
8301                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8302                                 pci_disable_msi(tp->pdev);
8303                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8304                         }
8305                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8306                         tg3_free_rings(tp);
8307                         tg3_free_consistent(tp);
8308
8309                         tg3_full_unlock(tp);
8310
8311                         napi_disable(&tp->napi);
8312
8313                         return err;
8314                 }
8315
8316                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8317                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8318                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8319
8320                                 tw32(PCIE_TRANSACTION_CFG,
8321                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8322                         }
8323                 }
8324         }
8325
8326         tg3_phy_start(tp);
8327
8328         tg3_full_lock(tp, 0);
8329
8330         add_timer(&tp->timer);
8331         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8332         tg3_enable_ints(tp);
8333
8334         tg3_full_unlock(tp);
8335
8336         netif_start_queue(dev);
8337
8338         return 0;
8339 }
8340
8341 #if 0
8342 /*static*/ void tg3_dump_state(struct tg3 *tp)
8343 {
8344         u32 val32, val32_2, val32_3, val32_4, val32_5;
8345         u16 val16;
8346         int i;
8347
8348         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8349         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8350         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8351                val16, val32);
8352
8353         /* MAC block */
8354         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8355                tr32(MAC_MODE), tr32(MAC_STATUS));
8356         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8357                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8358         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8359                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8360         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8361                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8362
8363         /* Send data initiator control block */
8364         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8365                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8366         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8367                tr32(SNDDATAI_STATSCTRL));
8368
8369         /* Send data completion control block */
8370         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8371
8372         /* Send BD ring selector block */
8373         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8374                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8375
8376         /* Send BD initiator control block */
8377         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8378                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8379
8380         /* Send BD completion control block */
8381         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8382
8383         /* Receive list placement control block */
8384         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8385                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8386         printk("       RCVLPC_STATSCTRL[%08x]\n",
8387                tr32(RCVLPC_STATSCTRL));
8388
8389         /* Receive data and receive BD initiator control block */
8390         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8391                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8392
8393         /* Receive data completion control block */
8394         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8395                tr32(RCVDCC_MODE));
8396
8397         /* Receive BD initiator control block */
8398         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8399                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8400
8401         /* Receive BD completion control block */
8402         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8403                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8404
8405         /* Receive list selector control block */
8406         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8407                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8408
8409         /* Mbuf cluster free block */
8410         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8411                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8412
8413         /* Host coalescing control block */
8414         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8415                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8416         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8417                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8418                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8419         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8420                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8421                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8422         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8423                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8424         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8425                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8426
8427         /* Memory arbiter control block */
8428         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8429                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8430
8431         /* Buffer manager control block */
8432         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8433                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8434         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8435                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8436         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8437                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8438                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8439                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8440
8441         /* Read DMA control block */
8442         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8443                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8444
8445         /* Write DMA control block */
8446         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8447                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8448
8449         /* DMA completion block */
8450         printk("DEBUG: DMAC_MODE[%08x]\n",
8451                tr32(DMAC_MODE));
8452
8453         /* GRC block */
8454         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8455                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8456         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8457                tr32(GRC_LOCAL_CTRL));
8458
8459         /* TG3_BDINFOs */
8460         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8461                tr32(RCVDBDI_JUMBO_BD + 0x0),
8462                tr32(RCVDBDI_JUMBO_BD + 0x4),
8463                tr32(RCVDBDI_JUMBO_BD + 0x8),
8464                tr32(RCVDBDI_JUMBO_BD + 0xc));
8465         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8466                tr32(RCVDBDI_STD_BD + 0x0),
8467                tr32(RCVDBDI_STD_BD + 0x4),
8468                tr32(RCVDBDI_STD_BD + 0x8),
8469                tr32(RCVDBDI_STD_BD + 0xc));
8470         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8471                tr32(RCVDBDI_MINI_BD + 0x0),
8472                tr32(RCVDBDI_MINI_BD + 0x4),
8473                tr32(RCVDBDI_MINI_BD + 0x8),
8474                tr32(RCVDBDI_MINI_BD + 0xc));
8475
8476         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8477         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8478         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8479         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8480         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8481                val32, val32_2, val32_3, val32_4);
8482
8483         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8484         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8485         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8486         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8487         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8488                val32, val32_2, val32_3, val32_4);
8489
8490         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8491         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8492         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8493         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8494         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8495         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8496                val32, val32_2, val32_3, val32_4, val32_5);
8497
8498         /* SW status block */
8499         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8500                tp->hw_status->status,
8501                tp->hw_status->status_tag,
8502                tp->hw_status->rx_jumbo_consumer,
8503                tp->hw_status->rx_consumer,
8504                tp->hw_status->rx_mini_consumer,
8505                tp->hw_status->idx[0].rx_producer,
8506                tp->hw_status->idx[0].tx_consumer);
8507
8508         /* SW statistics block */
8509         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8510                ((u32 *)tp->hw_stats)[0],
8511                ((u32 *)tp->hw_stats)[1],
8512                ((u32 *)tp->hw_stats)[2],
8513                ((u32 *)tp->hw_stats)[3]);
8514
8515         /* Mailboxes */
8516         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8517                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8518                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8519                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8520                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8521
8522         /* NIC side send descriptors. */
8523         for (i = 0; i < 6; i++) {
8524                 unsigned long txd;
8525
8526                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8527                         + (i * sizeof(struct tg3_tx_buffer_desc));
8528                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8529                        i,
8530                        readl(txd + 0x0), readl(txd + 0x4),
8531                        readl(txd + 0x8), readl(txd + 0xc));
8532         }
8533
8534         /* NIC side RX descriptors. */
8535         for (i = 0; i < 6; i++) {
8536                 unsigned long rxd;
8537
8538                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8539                         + (i * sizeof(struct tg3_rx_buffer_desc));
8540                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8541                        i,
8542                        readl(rxd + 0x0), readl(rxd + 0x4),
8543                        readl(rxd + 0x8), readl(rxd + 0xc));
8544                 rxd += (4 * sizeof(u32));
8545                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8546                        i,
8547                        readl(rxd + 0x0), readl(rxd + 0x4),
8548                        readl(rxd + 0x8), readl(rxd + 0xc));
8549         }
8550
8551         for (i = 0; i < 6; i++) {
8552                 unsigned long rxd;
8553
8554                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8555                         + (i * sizeof(struct tg3_rx_buffer_desc));
8556                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8557                        i,
8558                        readl(rxd + 0x0), readl(rxd + 0x4),
8559                        readl(rxd + 0x8), readl(rxd + 0xc));
8560                 rxd += (4 * sizeof(u32));
8561                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8562                        i,
8563                        readl(rxd + 0x0), readl(rxd + 0x4),
8564                        readl(rxd + 0x8), readl(rxd + 0xc));
8565         }
8566 }
8567 #endif
8568
8569 static struct net_device_stats *tg3_get_stats(struct net_device *);
8570 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8571
8572 static int tg3_close(struct net_device *dev)
8573 {
8574         struct tg3 *tp = netdev_priv(dev);
8575
8576         napi_disable(&tp->napi);
8577         cancel_work_sync(&tp->reset_task);
8578
8579         netif_stop_queue(dev);
8580
8581         del_timer_sync(&tp->timer);
8582
8583         tg3_full_lock(tp, 1);
8584 #if 0
8585         tg3_dump_state(tp);
8586 #endif
8587
8588         tg3_disable_ints(tp);
8589
8590         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8591         tg3_free_rings(tp);
8592         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8593
8594         tg3_full_unlock(tp);
8595
8596         free_irq(tp->pdev->irq, dev);
8597         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8598                 pci_disable_msi(tp->pdev);
8599                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8600         }
8601
8602         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8603                sizeof(tp->net_stats_prev));
8604         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8605                sizeof(tp->estats_prev));
8606
8607         tg3_free_consistent(tp);
8608
8609         tg3_set_power_state(tp, PCI_D3hot);
8610
8611         netif_carrier_off(tp->dev);
8612
8613         return 0;
8614 }
8615
8616 static inline unsigned long get_stat64(tg3_stat64_t *val)
8617 {
8618         unsigned long ret;
8619
8620 #if (BITS_PER_LONG == 32)
8621         ret = val->low;
8622 #else
8623         ret = ((u64)val->high << 32) | ((u64)val->low);
8624 #endif
8625         return ret;
8626 }
8627
8628 static inline u64 get_estat64(tg3_stat64_t *val)
8629 {
8630        return ((u64)val->high << 32) | ((u64)val->low);
8631 }
8632
8633 static unsigned long calc_crc_errors(struct tg3 *tp)
8634 {
8635         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8636
8637         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8638             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8639              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8640                 u32 val;
8641
8642                 spin_lock_bh(&tp->lock);
8643                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8644                         tg3_writephy(tp, MII_TG3_TEST1,
8645                                      val | MII_TG3_TEST1_CRC_EN);
8646                         tg3_readphy(tp, 0x14, &val);
8647                 } else
8648                         val = 0;
8649                 spin_unlock_bh(&tp->lock);
8650
8651                 tp->phy_crc_errors += val;
8652
8653                 return tp->phy_crc_errors;
8654         }
8655
8656         return get_stat64(&hw_stats->rx_fcs_errors);
8657 }
8658
8659 #define ESTAT_ADD(member) \
8660         estats->member =        old_estats->member + \
8661                                 get_estat64(&hw_stats->member)
8662
8663 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8664 {
8665         struct tg3_ethtool_stats *estats = &tp->estats;
8666         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8667         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8668
8669         if (!hw_stats)
8670                 return old_estats;
8671
8672         ESTAT_ADD(rx_octets);
8673         ESTAT_ADD(rx_fragments);
8674         ESTAT_ADD(rx_ucast_packets);
8675         ESTAT_ADD(rx_mcast_packets);
8676         ESTAT_ADD(rx_bcast_packets);
8677         ESTAT_ADD(rx_fcs_errors);
8678         ESTAT_ADD(rx_align_errors);
8679         ESTAT_ADD(rx_xon_pause_rcvd);
8680         ESTAT_ADD(rx_xoff_pause_rcvd);
8681         ESTAT_ADD(rx_mac_ctrl_rcvd);
8682         ESTAT_ADD(rx_xoff_entered);
8683         ESTAT_ADD(rx_frame_too_long_errors);
8684         ESTAT_ADD(rx_jabbers);
8685         ESTAT_ADD(rx_undersize_packets);
8686         ESTAT_ADD(rx_in_length_errors);
8687         ESTAT_ADD(rx_out_length_errors);
8688         ESTAT_ADD(rx_64_or_less_octet_packets);
8689         ESTAT_ADD(rx_65_to_127_octet_packets);
8690         ESTAT_ADD(rx_128_to_255_octet_packets);
8691         ESTAT_ADD(rx_256_to_511_octet_packets);
8692         ESTAT_ADD(rx_512_to_1023_octet_packets);
8693         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8694         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8695         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8696         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8697         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8698
8699         ESTAT_ADD(tx_octets);
8700         ESTAT_ADD(tx_collisions);
8701         ESTAT_ADD(tx_xon_sent);
8702         ESTAT_ADD(tx_xoff_sent);
8703         ESTAT_ADD(tx_flow_control);
8704         ESTAT_ADD(tx_mac_errors);
8705         ESTAT_ADD(tx_single_collisions);
8706         ESTAT_ADD(tx_mult_collisions);
8707         ESTAT_ADD(tx_deferred);
8708         ESTAT_ADD(tx_excessive_collisions);
8709         ESTAT_ADD(tx_late_collisions);
8710         ESTAT_ADD(tx_collide_2times);
8711         ESTAT_ADD(tx_collide_3times);
8712         ESTAT_ADD(tx_collide_4times);
8713         ESTAT_ADD(tx_collide_5times);
8714         ESTAT_ADD(tx_collide_6times);
8715         ESTAT_ADD(tx_collide_7times);
8716         ESTAT_ADD(tx_collide_8times);
8717         ESTAT_ADD(tx_collide_9times);
8718         ESTAT_ADD(tx_collide_10times);
8719         ESTAT_ADD(tx_collide_11times);
8720         ESTAT_ADD(tx_collide_12times);
8721         ESTAT_ADD(tx_collide_13times);
8722         ESTAT_ADD(tx_collide_14times);
8723         ESTAT_ADD(tx_collide_15times);
8724         ESTAT_ADD(tx_ucast_packets);
8725         ESTAT_ADD(tx_mcast_packets);
8726         ESTAT_ADD(tx_bcast_packets);
8727         ESTAT_ADD(tx_carrier_sense_errors);
8728         ESTAT_ADD(tx_discards);
8729         ESTAT_ADD(tx_errors);
8730
8731         ESTAT_ADD(dma_writeq_full);
8732         ESTAT_ADD(dma_write_prioq_full);
8733         ESTAT_ADD(rxbds_empty);
8734         ESTAT_ADD(rx_discards);
8735         ESTAT_ADD(rx_errors);
8736         ESTAT_ADD(rx_threshold_hit);
8737
8738         ESTAT_ADD(dma_readq_full);
8739         ESTAT_ADD(dma_read_prioq_full);
8740         ESTAT_ADD(tx_comp_queue_full);
8741
8742         ESTAT_ADD(ring_set_send_prod_index);
8743         ESTAT_ADD(ring_status_update);
8744         ESTAT_ADD(nic_irqs);
8745         ESTAT_ADD(nic_avoided_irqs);
8746         ESTAT_ADD(nic_tx_threshold_hit);
8747
8748         return estats;
8749 }
8750
8751 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8752 {
8753         struct tg3 *tp = netdev_priv(dev);
8754         struct net_device_stats *stats = &tp->net_stats;
8755         struct net_device_stats *old_stats = &tp->net_stats_prev;
8756         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8757
8758         if (!hw_stats)
8759                 return old_stats;
8760
8761         stats->rx_packets = old_stats->rx_packets +
8762                 get_stat64(&hw_stats->rx_ucast_packets) +
8763                 get_stat64(&hw_stats->rx_mcast_packets) +
8764                 get_stat64(&hw_stats->rx_bcast_packets);
8765
8766         stats->tx_packets = old_stats->tx_packets +
8767                 get_stat64(&hw_stats->tx_ucast_packets) +
8768                 get_stat64(&hw_stats->tx_mcast_packets) +
8769                 get_stat64(&hw_stats->tx_bcast_packets);
8770
8771         stats->rx_bytes = old_stats->rx_bytes +
8772                 get_stat64(&hw_stats->rx_octets);
8773         stats->tx_bytes = old_stats->tx_bytes +
8774                 get_stat64(&hw_stats->tx_octets);
8775
8776         stats->rx_errors = old_stats->rx_errors +
8777                 get_stat64(&hw_stats->rx_errors);
8778         stats->tx_errors = old_stats->tx_errors +
8779                 get_stat64(&hw_stats->tx_errors) +
8780                 get_stat64(&hw_stats->tx_mac_errors) +
8781                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8782                 get_stat64(&hw_stats->tx_discards);
8783
8784         stats->multicast = old_stats->multicast +
8785                 get_stat64(&hw_stats->rx_mcast_packets);
8786         stats->collisions = old_stats->collisions +
8787                 get_stat64(&hw_stats->tx_collisions);
8788
8789         stats->rx_length_errors = old_stats->rx_length_errors +
8790                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8791                 get_stat64(&hw_stats->rx_undersize_packets);
8792
8793         stats->rx_over_errors = old_stats->rx_over_errors +
8794                 get_stat64(&hw_stats->rxbds_empty);
8795         stats->rx_frame_errors = old_stats->rx_frame_errors +
8796                 get_stat64(&hw_stats->rx_align_errors);
8797         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8798                 get_stat64(&hw_stats->tx_discards);
8799         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8800                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8801
8802         stats->rx_crc_errors = old_stats->rx_crc_errors +
8803                 calc_crc_errors(tp);
8804
8805         stats->rx_missed_errors = old_stats->rx_missed_errors +
8806                 get_stat64(&hw_stats->rx_discards);
8807
8808         return stats;
8809 }
8810
8811 static inline u32 calc_crc(unsigned char *buf, int len)
8812 {
8813         u32 reg;
8814         u32 tmp;
8815         int j, k;
8816
8817         reg = 0xffffffff;
8818
8819         for (j = 0; j < len; j++) {
8820                 reg ^= buf[j];
8821
8822                 for (k = 0; k < 8; k++) {
8823                         tmp = reg & 0x01;
8824
8825                         reg >>= 1;
8826
8827                         if (tmp) {
8828                                 reg ^= 0xedb88320;
8829                         }
8830                 }
8831         }
8832
8833         return ~reg;
8834 }
8835
8836 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8837 {
8838         /* accept or reject all multicast frames */
8839         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8840         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8841         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8842         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8843 }
8844
8845 static void __tg3_set_rx_mode(struct net_device *dev)
8846 {
8847         struct tg3 *tp = netdev_priv(dev);
8848         u32 rx_mode;
8849
8850         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8851                                   RX_MODE_KEEP_VLAN_TAG);
8852
8853         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8854          * flag clear.
8855          */
8856 #if TG3_VLAN_TAG_USED
8857         if (!tp->vlgrp &&
8858             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8859                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8860 #else
8861         /* By definition, VLAN is disabled always in this
8862          * case.
8863          */
8864         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8865                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8866 #endif
8867
8868         if (dev->flags & IFF_PROMISC) {
8869                 /* Promiscuous mode. */
8870                 rx_mode |= RX_MODE_PROMISC;
8871         } else if (dev->flags & IFF_ALLMULTI) {
8872                 /* Accept all multicast. */
8873                 tg3_set_multi (tp, 1);
8874         } else if (dev->mc_count < 1) {
8875                 /* Reject all multicast. */
8876                 tg3_set_multi (tp, 0);
8877         } else {
8878                 /* Accept one or more multicast(s). */
8879                 struct dev_mc_list *mclist;
8880                 unsigned int i;
8881                 u32 mc_filter[4] = { 0, };
8882                 u32 regidx;
8883                 u32 bit;
8884                 u32 crc;
8885
8886                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8887                      i++, mclist = mclist->next) {
8888
8889                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8890                         bit = ~crc & 0x7f;
8891                         regidx = (bit & 0x60) >> 5;
8892                         bit &= 0x1f;
8893                         mc_filter[regidx] |= (1 << bit);
8894                 }
8895
8896                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8897                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8898                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8899                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8900         }
8901
8902         if (rx_mode != tp->rx_mode) {
8903                 tp->rx_mode = rx_mode;
8904                 tw32_f(MAC_RX_MODE, rx_mode);
8905                 udelay(10);
8906         }
8907 }
8908
8909 static void tg3_set_rx_mode(struct net_device *dev)
8910 {
8911         struct tg3 *tp = netdev_priv(dev);
8912
8913         if (!netif_running(dev))
8914                 return;
8915
8916         tg3_full_lock(tp, 0);
8917         __tg3_set_rx_mode(dev);
8918         tg3_full_unlock(tp);
8919 }
8920
8921 #define TG3_REGDUMP_LEN         (32 * 1024)
8922
8923 static int tg3_get_regs_len(struct net_device *dev)
8924 {
8925         return TG3_REGDUMP_LEN;
8926 }
8927
8928 static void tg3_get_regs(struct net_device *dev,
8929                 struct ethtool_regs *regs, void *_p)
8930 {
8931         u32 *p = _p;
8932         struct tg3 *tp = netdev_priv(dev);
8933         u8 *orig_p = _p;
8934         int i;
8935
8936         regs->version = 0;
8937
8938         memset(p, 0, TG3_REGDUMP_LEN);
8939
8940         if (tp->link_config.phy_is_low_power)
8941                 return;
8942
8943         tg3_full_lock(tp, 0);
8944
8945 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8946 #define GET_REG32_LOOP(base,len)                \
8947 do {    p = (u32 *)(orig_p + (base));           \
8948         for (i = 0; i < len; i += 4)            \
8949                 __GET_REG32((base) + i);        \
8950 } while (0)
8951 #define GET_REG32_1(reg)                        \
8952 do {    p = (u32 *)(orig_p + (reg));            \
8953         __GET_REG32((reg));                     \
8954 } while (0)
8955
8956         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8957         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8958         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8959         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8960         GET_REG32_1(SNDDATAC_MODE);
8961         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8962         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8963         GET_REG32_1(SNDBDC_MODE);
8964         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8965         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8966         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8967         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8968         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8969         GET_REG32_1(RCVDCC_MODE);
8970         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8971         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8972         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8973         GET_REG32_1(MBFREE_MODE);
8974         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8975         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8976         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8977         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8978         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8979         GET_REG32_1(RX_CPU_MODE);
8980         GET_REG32_1(RX_CPU_STATE);
8981         GET_REG32_1(RX_CPU_PGMCTR);
8982         GET_REG32_1(RX_CPU_HWBKPT);
8983         GET_REG32_1(TX_CPU_MODE);
8984         GET_REG32_1(TX_CPU_STATE);
8985         GET_REG32_1(TX_CPU_PGMCTR);
8986         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8987         GET_REG32_LOOP(FTQ_RESET, 0x120);
8988         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8989         GET_REG32_1(DMAC_MODE);
8990         GET_REG32_LOOP(GRC_MODE, 0x4c);
8991         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8992                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8993
8994 #undef __GET_REG32
8995 #undef GET_REG32_LOOP
8996 #undef GET_REG32_1
8997
8998         tg3_full_unlock(tp);
8999 }
9000
9001 static int tg3_get_eeprom_len(struct net_device *dev)
9002 {
9003         struct tg3 *tp = netdev_priv(dev);
9004
9005         return tp->nvram_size;
9006 }
9007
9008 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
9009 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
9010 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
9011
9012 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9013 {
9014         struct tg3 *tp = netdev_priv(dev);
9015         int ret;
9016         u8  *pd;
9017         u32 i, offset, len, b_offset, b_count;
9018         __le32 val;
9019
9020         if (tp->link_config.phy_is_low_power)
9021                 return -EAGAIN;
9022
9023         offset = eeprom->offset;
9024         len = eeprom->len;
9025         eeprom->len = 0;
9026
9027         eeprom->magic = TG3_EEPROM_MAGIC;
9028
9029         if (offset & 3) {
9030                 /* adjustments to start on required 4 byte boundary */
9031                 b_offset = offset & 3;
9032                 b_count = 4 - b_offset;
9033                 if (b_count > len) {
9034                         /* i.e. offset=1 len=2 */
9035                         b_count = len;
9036                 }
9037                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
9038                 if (ret)
9039                         return ret;
9040                 memcpy(data, ((char*)&val) + b_offset, b_count);
9041                 len -= b_count;
9042                 offset += b_count;
9043                 eeprom->len += b_count;
9044         }
9045
9046         /* read bytes upto the last 4 byte boundary */
9047         pd = &data[eeprom->len];
9048         for (i = 0; i < (len - (len & 3)); i += 4) {
9049                 ret = tg3_nvram_read_le(tp, offset + i, &val);
9050                 if (ret) {
9051                         eeprom->len += i;
9052                         return ret;
9053                 }
9054                 memcpy(pd + i, &val, 4);
9055         }
9056         eeprom->len += i;
9057
9058         if (len & 3) {
9059                 /* read last bytes not ending on 4 byte boundary */
9060                 pd = &data[eeprom->len];
9061                 b_count = len & 3;
9062                 b_offset = offset + len - b_count;
9063                 ret = tg3_nvram_read_le(tp, b_offset, &val);
9064                 if (ret)
9065                         return ret;
9066                 memcpy(pd, &val, b_count);
9067                 eeprom->len += b_count;
9068         }
9069         return 0;
9070 }
9071
9072 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9073
9074 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9075 {
9076         struct tg3 *tp = netdev_priv(dev);
9077         int ret;
9078         u32 offset, len, b_offset, odd_len;
9079         u8 *buf;
9080         __le32 start, end;
9081
9082         if (tp->link_config.phy_is_low_power)
9083                 return -EAGAIN;
9084
9085         if (eeprom->magic != TG3_EEPROM_MAGIC)
9086                 return -EINVAL;
9087
9088         offset = eeprom->offset;
9089         len = eeprom->len;
9090
9091         if ((b_offset = (offset & 3))) {
9092                 /* adjustments to start on required 4 byte boundary */
9093                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
9094                 if (ret)
9095                         return ret;
9096                 len += b_offset;
9097                 offset &= ~3;
9098                 if (len < 4)
9099                         len = 4;
9100         }
9101
9102         odd_len = 0;
9103         if (len & 3) {
9104                 /* adjustments to end on required 4 byte boundary */
9105                 odd_len = 1;
9106                 len = (len + 3) & ~3;
9107                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
9108                 if (ret)
9109                         return ret;
9110         }
9111
9112         buf = data;
9113         if (b_offset || odd_len) {
9114                 buf = kmalloc(len, GFP_KERNEL);
9115                 if (!buf)
9116                         return -ENOMEM;
9117                 if (b_offset)
9118                         memcpy(buf, &start, 4);
9119                 if (odd_len)
9120                         memcpy(buf+len-4, &end, 4);
9121                 memcpy(buf + b_offset, data, eeprom->len);
9122         }
9123
9124         ret = tg3_nvram_write_block(tp, offset, len, buf);
9125
9126         if (buf != data)
9127                 kfree(buf);
9128
9129         return ret;
9130 }
9131
9132 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9133 {
9134         struct tg3 *tp = netdev_priv(dev);
9135
9136         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9137                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9138                         return -EAGAIN;
9139                 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9140         }
9141
9142         cmd->supported = (SUPPORTED_Autoneg);
9143
9144         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9145                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9146                                    SUPPORTED_1000baseT_Full);
9147
9148         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9149                 cmd->supported |= (SUPPORTED_100baseT_Half |
9150                                   SUPPORTED_100baseT_Full |
9151                                   SUPPORTED_10baseT_Half |
9152                                   SUPPORTED_10baseT_Full |
9153                                   SUPPORTED_TP);
9154                 cmd->port = PORT_TP;
9155         } else {
9156                 cmd->supported |= SUPPORTED_FIBRE;
9157                 cmd->port = PORT_FIBRE;
9158         }
9159
9160         cmd->advertising = tp->link_config.advertising;
9161         if (netif_running(dev)) {
9162                 cmd->speed = tp->link_config.active_speed;
9163                 cmd->duplex = tp->link_config.active_duplex;
9164         }
9165         cmd->phy_address = PHY_ADDR;
9166         cmd->transceiver = 0;
9167         cmd->autoneg = tp->link_config.autoneg;
9168         cmd->maxtxpkt = 0;
9169         cmd->maxrxpkt = 0;
9170         return 0;
9171 }
9172
9173 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9174 {
9175         struct tg3 *tp = netdev_priv(dev);
9176
9177         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9178                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9179                         return -EAGAIN;
9180                 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9181         }
9182
9183         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9184                 /* These are the only valid advertisement bits allowed.  */
9185                 if (cmd->autoneg == AUTONEG_ENABLE &&
9186                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9187                                           ADVERTISED_1000baseT_Full |
9188                                           ADVERTISED_Autoneg |
9189                                           ADVERTISED_FIBRE)))
9190                         return -EINVAL;
9191                 /* Fiber can only do SPEED_1000.  */
9192                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9193                          (cmd->speed != SPEED_1000))
9194                         return -EINVAL;
9195         /* Copper cannot force SPEED_1000.  */
9196         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9197                    (cmd->speed == SPEED_1000))
9198                 return -EINVAL;
9199         else if ((cmd->speed == SPEED_1000) &&
9200                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9201                 return -EINVAL;
9202
9203         tg3_full_lock(tp, 0);
9204
9205         tp->link_config.autoneg = cmd->autoneg;
9206         if (cmd->autoneg == AUTONEG_ENABLE) {
9207                 tp->link_config.advertising = (cmd->advertising |
9208                                               ADVERTISED_Autoneg);
9209                 tp->link_config.speed = SPEED_INVALID;
9210                 tp->link_config.duplex = DUPLEX_INVALID;
9211         } else {
9212                 tp->link_config.advertising = 0;
9213                 tp->link_config.speed = cmd->speed;
9214                 tp->link_config.duplex = cmd->duplex;
9215         }
9216
9217         tp->link_config.orig_speed = tp->link_config.speed;
9218         tp->link_config.orig_duplex = tp->link_config.duplex;
9219         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9220
9221         if (netif_running(dev))
9222                 tg3_setup_phy(tp, 1);
9223
9224         tg3_full_unlock(tp);
9225
9226         return 0;
9227 }
9228
9229 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9230 {
9231         struct tg3 *tp = netdev_priv(dev);
9232
9233         strcpy(info->driver, DRV_MODULE_NAME);
9234         strcpy(info->version, DRV_MODULE_VERSION);
9235         strcpy(info->fw_version, tp->fw_ver);
9236         strcpy(info->bus_info, pci_name(tp->pdev));
9237 }
9238
9239 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9240 {
9241         struct tg3 *tp = netdev_priv(dev);
9242
9243         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9244             device_can_wakeup(&tp->pdev->dev))
9245                 wol->supported = WAKE_MAGIC;
9246         else
9247                 wol->supported = 0;
9248         wol->wolopts = 0;
9249         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9250             device_can_wakeup(&tp->pdev->dev))
9251                 wol->wolopts = WAKE_MAGIC;
9252         memset(&wol->sopass, 0, sizeof(wol->sopass));
9253 }
9254
9255 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9256 {
9257         struct tg3 *tp = netdev_priv(dev);
9258         struct device *dp = &tp->pdev->dev;
9259
9260         if (wol->wolopts & ~WAKE_MAGIC)
9261                 return -EINVAL;
9262         if ((wol->wolopts & WAKE_MAGIC) &&
9263             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9264                 return -EINVAL;
9265
9266         spin_lock_bh(&tp->lock);
9267         if (wol->wolopts & WAKE_MAGIC) {
9268                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9269                 device_set_wakeup_enable(dp, true);
9270         } else {
9271                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9272                 device_set_wakeup_enable(dp, false);
9273         }
9274         spin_unlock_bh(&tp->lock);
9275
9276         return 0;
9277 }
9278
9279 static u32 tg3_get_msglevel(struct net_device *dev)
9280 {
9281         struct tg3 *tp = netdev_priv(dev);
9282         return tp->msg_enable;
9283 }
9284
9285 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9286 {
9287         struct tg3 *tp = netdev_priv(dev);
9288         tp->msg_enable = value;
9289 }
9290
9291 static int tg3_set_tso(struct net_device *dev, u32 value)
9292 {
9293         struct tg3 *tp = netdev_priv(dev);
9294
9295         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9296                 if (value)
9297                         return -EINVAL;
9298                 return 0;
9299         }
9300         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9301             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9302                 if (value) {
9303                         dev->features |= NETIF_F_TSO6;
9304                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9305                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9306                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9307                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9308                                 dev->features |= NETIF_F_TSO_ECN;
9309                 } else
9310                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9311         }
9312         return ethtool_op_set_tso(dev, value);
9313 }
9314
9315 static int tg3_nway_reset(struct net_device *dev)
9316 {
9317         struct tg3 *tp = netdev_priv(dev);
9318         int r;
9319
9320         if (!netif_running(dev))
9321                 return -EAGAIN;
9322
9323         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9324                 return -EINVAL;
9325
9326         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9327                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9328                         return -EAGAIN;
9329                 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9330         } else {
9331                 u32 bmcr;
9332
9333                 spin_lock_bh(&tp->lock);
9334                 r = -EINVAL;
9335                 tg3_readphy(tp, MII_BMCR, &bmcr);
9336                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9337                     ((bmcr & BMCR_ANENABLE) ||
9338                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9339                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9340                                                    BMCR_ANENABLE);
9341                         r = 0;
9342                 }
9343                 spin_unlock_bh(&tp->lock);
9344         }
9345
9346         return r;
9347 }
9348
9349 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9350 {
9351         struct tg3 *tp = netdev_priv(dev);
9352
9353         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9354         ering->rx_mini_max_pending = 0;
9355         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9356                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9357         else
9358                 ering->rx_jumbo_max_pending = 0;
9359
9360         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9361
9362         ering->rx_pending = tp->rx_pending;
9363         ering->rx_mini_pending = 0;
9364         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9365                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9366         else
9367                 ering->rx_jumbo_pending = 0;
9368
9369         ering->tx_pending = tp->tx_pending;
9370 }
9371
9372 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9373 {
9374         struct tg3 *tp = netdev_priv(dev);
9375         int irq_sync = 0, err = 0;
9376
9377         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9378             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9379             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9380             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9381             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9382              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9383                 return -EINVAL;
9384
9385         if (netif_running(dev)) {
9386                 tg3_phy_stop(tp);
9387                 tg3_netif_stop(tp);
9388                 irq_sync = 1;
9389         }
9390
9391         tg3_full_lock(tp, irq_sync);
9392
9393         tp->rx_pending = ering->rx_pending;
9394
9395         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9396             tp->rx_pending > 63)
9397                 tp->rx_pending = 63;
9398         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9399         tp->tx_pending = ering->tx_pending;
9400
9401         if (netif_running(dev)) {
9402                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9403                 err = tg3_restart_hw(tp, 1);
9404                 if (!err)
9405                         tg3_netif_start(tp);
9406         }
9407
9408         tg3_full_unlock(tp);
9409
9410         if (irq_sync && !err)
9411                 tg3_phy_start(tp);
9412
9413         return err;
9414 }
9415
9416 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9417 {
9418         struct tg3 *tp = netdev_priv(dev);
9419
9420         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9421
9422         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9423                 epause->rx_pause = 1;
9424         else
9425                 epause->rx_pause = 0;
9426
9427         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9428                 epause->tx_pause = 1;
9429         else
9430                 epause->tx_pause = 0;
9431 }
9432
9433 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9434 {
9435         struct tg3 *tp = netdev_priv(dev);
9436         int err = 0;
9437
9438         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9439                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9440                         return -EAGAIN;
9441
9442                 if (epause->autoneg) {
9443                         u32 newadv;
9444                         struct phy_device *phydev;
9445
9446                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9447
9448                         if (epause->rx_pause) {
9449                                 if (epause->tx_pause)
9450                                         newadv = ADVERTISED_Pause;
9451                                 else
9452                                         newadv = ADVERTISED_Pause |
9453                                                  ADVERTISED_Asym_Pause;
9454                         } else if (epause->tx_pause) {
9455                                 newadv = ADVERTISED_Asym_Pause;
9456                         } else
9457                                 newadv = 0;
9458
9459                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9460                                 u32 oldadv = phydev->advertising &
9461                                              (ADVERTISED_Pause |
9462                                               ADVERTISED_Asym_Pause);
9463                                 if (oldadv != newadv) {
9464                                         phydev->advertising &=
9465                                                 ~(ADVERTISED_Pause |
9466                                                   ADVERTISED_Asym_Pause);
9467                                         phydev->advertising |= newadv;
9468                                         err = phy_start_aneg(phydev);
9469                                 }
9470                         } else {
9471                                 tp->link_config.advertising &=
9472                                                 ~(ADVERTISED_Pause |
9473                                                   ADVERTISED_Asym_Pause);
9474                                 tp->link_config.advertising |= newadv;
9475                         }
9476                 } else {
9477                         if (epause->rx_pause)
9478                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9479                         else
9480                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9481
9482                         if (epause->tx_pause)
9483                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9484                         else
9485                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9486
9487                         if (netif_running(dev))
9488                                 tg3_setup_flow_control(tp, 0, 0);
9489                 }
9490         } else {
9491                 int irq_sync = 0;
9492
9493                 if (netif_running(dev)) {
9494                         tg3_netif_stop(tp);
9495                         irq_sync = 1;
9496                 }
9497
9498                 tg3_full_lock(tp, irq_sync);
9499
9500                 if (epause->autoneg)
9501                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9502                 else
9503                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9504                 if (epause->rx_pause)
9505                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9506                 else
9507                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9508                 if (epause->tx_pause)
9509                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9510                 else
9511                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9512
9513                 if (netif_running(dev)) {
9514                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9515                         err = tg3_restart_hw(tp, 1);
9516                         if (!err)
9517                                 tg3_netif_start(tp);
9518                 }
9519
9520                 tg3_full_unlock(tp);
9521         }
9522
9523         return err;
9524 }
9525
9526 static u32 tg3_get_rx_csum(struct net_device *dev)
9527 {
9528         struct tg3 *tp = netdev_priv(dev);
9529         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9530 }
9531
9532 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9533 {
9534         struct tg3 *tp = netdev_priv(dev);
9535
9536         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9537                 if (data != 0)
9538                         return -EINVAL;
9539                 return 0;
9540         }
9541
9542         spin_lock_bh(&tp->lock);
9543         if (data)
9544                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9545         else
9546                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9547         spin_unlock_bh(&tp->lock);
9548
9549         return 0;
9550 }
9551
9552 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9553 {
9554         struct tg3 *tp = netdev_priv(dev);
9555
9556         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9557                 if (data != 0)
9558                         return -EINVAL;
9559                 return 0;
9560         }
9561
9562         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9563             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9564             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9565             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9566             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9567                 ethtool_op_set_tx_ipv6_csum(dev, data);
9568         else
9569                 ethtool_op_set_tx_csum(dev, data);
9570
9571         return 0;
9572 }
9573
9574 static int tg3_get_sset_count (struct net_device *dev, int sset)
9575 {
9576         switch (sset) {
9577         case ETH_SS_TEST:
9578                 return TG3_NUM_TEST;
9579         case ETH_SS_STATS:
9580                 return TG3_NUM_STATS;
9581         default:
9582                 return -EOPNOTSUPP;
9583         }
9584 }
9585
9586 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9587 {
9588         switch (stringset) {
9589         case ETH_SS_STATS:
9590                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9591                 break;
9592         case ETH_SS_TEST:
9593                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9594                 break;
9595         default:
9596                 WARN_ON(1);     /* we need a WARN() */
9597                 break;
9598         }
9599 }
9600
9601 static int tg3_phys_id(struct net_device *dev, u32 data)
9602 {
9603         struct tg3 *tp = netdev_priv(dev);
9604         int i;
9605
9606         if (!netif_running(tp->dev))
9607                 return -EAGAIN;
9608
9609         if (data == 0)
9610                 data = UINT_MAX / 2;
9611
9612         for (i = 0; i < (data * 2); i++) {
9613                 if ((i % 2) == 0)
9614                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9615                                            LED_CTRL_1000MBPS_ON |
9616                                            LED_CTRL_100MBPS_ON |
9617                                            LED_CTRL_10MBPS_ON |
9618                                            LED_CTRL_TRAFFIC_OVERRIDE |
9619                                            LED_CTRL_TRAFFIC_BLINK |
9620                                            LED_CTRL_TRAFFIC_LED);
9621
9622                 else
9623                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9624                                            LED_CTRL_TRAFFIC_OVERRIDE);
9625
9626                 if (msleep_interruptible(500))
9627                         break;
9628         }
9629         tw32(MAC_LED_CTRL, tp->led_ctrl);
9630         return 0;
9631 }
9632
9633 static void tg3_get_ethtool_stats (struct net_device *dev,
9634                                    struct ethtool_stats *estats, u64 *tmp_stats)
9635 {
9636         struct tg3 *tp = netdev_priv(dev);
9637         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9638 }
9639
9640 #define NVRAM_TEST_SIZE 0x100
9641 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9642 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9643 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9644 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9645 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9646
9647 static int tg3_test_nvram(struct tg3 *tp)
9648 {
9649         u32 csum, magic;
9650         __le32 *buf;
9651         int i, j, k, err = 0, size;
9652
9653         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9654                 return -EIO;
9655
9656         if (magic == TG3_EEPROM_MAGIC)
9657                 size = NVRAM_TEST_SIZE;
9658         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9659                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9660                     TG3_EEPROM_SB_FORMAT_1) {
9661                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9662                         case TG3_EEPROM_SB_REVISION_0:
9663                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9664                                 break;
9665                         case TG3_EEPROM_SB_REVISION_2:
9666                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9667                                 break;
9668                         case TG3_EEPROM_SB_REVISION_3:
9669                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9670                                 break;
9671                         default:
9672                                 return 0;
9673                         }
9674                 } else
9675                         return 0;
9676         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9677                 size = NVRAM_SELFBOOT_HW_SIZE;
9678         else
9679                 return -EIO;
9680
9681         buf = kmalloc(size, GFP_KERNEL);
9682         if (buf == NULL)
9683                 return -ENOMEM;
9684
9685         err = -EIO;
9686         for (i = 0, j = 0; i < size; i += 4, j++) {
9687                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9688                         break;
9689         }
9690         if (i < size)
9691                 goto out;
9692
9693         /* Selfboot format */
9694         magic = swab32(le32_to_cpu(buf[0]));
9695         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9696             TG3_EEPROM_MAGIC_FW) {
9697                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9698
9699                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9700                     TG3_EEPROM_SB_REVISION_2) {
9701                         /* For rev 2, the csum doesn't include the MBA. */
9702                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9703                                 csum8 += buf8[i];
9704                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9705                                 csum8 += buf8[i];
9706                 } else {
9707                         for (i = 0; i < size; i++)
9708                                 csum8 += buf8[i];
9709                 }
9710
9711                 if (csum8 == 0) {
9712                         err = 0;
9713                         goto out;
9714                 }
9715
9716                 err = -EIO;
9717                 goto out;
9718         }
9719
9720         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9721             TG3_EEPROM_MAGIC_HW) {
9722                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9723                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9724                 u8 *buf8 = (u8 *) buf;
9725
9726                 /* Separate the parity bits and the data bytes.  */
9727                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9728                         if ((i == 0) || (i == 8)) {
9729                                 int l;
9730                                 u8 msk;
9731
9732                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9733                                         parity[k++] = buf8[i] & msk;
9734                                 i++;
9735                         }
9736                         else if (i == 16) {
9737                                 int l;
9738                                 u8 msk;
9739
9740                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9741                                         parity[k++] = buf8[i] & msk;
9742                                 i++;
9743
9744                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9745                                         parity[k++] = buf8[i] & msk;
9746                                 i++;
9747                         }
9748                         data[j++] = buf8[i];
9749                 }
9750
9751                 err = -EIO;
9752                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9753                         u8 hw8 = hweight8(data[i]);
9754
9755                         if ((hw8 & 0x1) && parity[i])
9756                                 goto out;
9757                         else if (!(hw8 & 0x1) && !parity[i])
9758                                 goto out;
9759                 }
9760                 err = 0;
9761                 goto out;
9762         }
9763
9764         /* Bootstrap checksum at offset 0x10 */
9765         csum = calc_crc((unsigned char *) buf, 0x10);
9766         if(csum != le32_to_cpu(buf[0x10/4]))
9767                 goto out;
9768
9769         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9770         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9771         if (csum != le32_to_cpu(buf[0xfc/4]))
9772                  goto out;
9773
9774         err = 0;
9775
9776 out:
9777         kfree(buf);
9778         return err;
9779 }
9780
9781 #define TG3_SERDES_TIMEOUT_SEC  2
9782 #define TG3_COPPER_TIMEOUT_SEC  6
9783
9784 static int tg3_test_link(struct tg3 *tp)
9785 {
9786         int i, max;
9787
9788         if (!netif_running(tp->dev))
9789                 return -ENODEV;
9790
9791         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9792                 max = TG3_SERDES_TIMEOUT_SEC;
9793         else
9794                 max = TG3_COPPER_TIMEOUT_SEC;
9795
9796         for (i = 0; i < max; i++) {
9797                 if (netif_carrier_ok(tp->dev))
9798                         return 0;
9799
9800                 if (msleep_interruptible(1000))
9801                         break;
9802         }
9803
9804         return -EIO;
9805 }
9806
9807 /* Only test the commonly used registers */
9808 static int tg3_test_registers(struct tg3 *tp)
9809 {
9810         int i, is_5705, is_5750;
9811         u32 offset, read_mask, write_mask, val, save_val, read_val;
9812         static struct {
9813                 u16 offset;
9814                 u16 flags;
9815 #define TG3_FL_5705     0x1
9816 #define TG3_FL_NOT_5705 0x2
9817 #define TG3_FL_NOT_5788 0x4
9818 #define TG3_FL_NOT_5750 0x8
9819                 u32 read_mask;
9820                 u32 write_mask;
9821         } reg_tbl[] = {
9822                 /* MAC Control Registers */
9823                 { MAC_MODE, TG3_FL_NOT_5705,
9824                         0x00000000, 0x00ef6f8c },
9825                 { MAC_MODE, TG3_FL_5705,
9826                         0x00000000, 0x01ef6b8c },
9827                 { MAC_STATUS, TG3_FL_NOT_5705,
9828                         0x03800107, 0x00000000 },
9829                 { MAC_STATUS, TG3_FL_5705,
9830                         0x03800100, 0x00000000 },
9831                 { MAC_ADDR_0_HIGH, 0x0000,
9832                         0x00000000, 0x0000ffff },
9833                 { MAC_ADDR_0_LOW, 0x0000,
9834                         0x00000000, 0xffffffff },
9835                 { MAC_RX_MTU_SIZE, 0x0000,
9836                         0x00000000, 0x0000ffff },
9837                 { MAC_TX_MODE, 0x0000,
9838                         0x00000000, 0x00000070 },
9839                 { MAC_TX_LENGTHS, 0x0000,
9840                         0x00000000, 0x00003fff },
9841                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9842                         0x00000000, 0x000007fc },
9843                 { MAC_RX_MODE, TG3_FL_5705,
9844                         0x00000000, 0x000007dc },
9845                 { MAC_HASH_REG_0, 0x0000,
9846                         0x00000000, 0xffffffff },
9847                 { MAC_HASH_REG_1, 0x0000,
9848                         0x00000000, 0xffffffff },
9849                 { MAC_HASH_REG_2, 0x0000,
9850                         0x00000000, 0xffffffff },
9851                 { MAC_HASH_REG_3, 0x0000,
9852                         0x00000000, 0xffffffff },
9853
9854                 /* Receive Data and Receive BD Initiator Control Registers. */
9855                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9856                         0x00000000, 0xffffffff },
9857                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9858                         0x00000000, 0xffffffff },
9859                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9860                         0x00000000, 0x00000003 },
9861                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9862                         0x00000000, 0xffffffff },
9863                 { RCVDBDI_STD_BD+0, 0x0000,
9864                         0x00000000, 0xffffffff },
9865                 { RCVDBDI_STD_BD+4, 0x0000,
9866                         0x00000000, 0xffffffff },
9867                 { RCVDBDI_STD_BD+8, 0x0000,
9868                         0x00000000, 0xffff0002 },
9869                 { RCVDBDI_STD_BD+0xc, 0x0000,
9870                         0x00000000, 0xffffffff },
9871
9872                 /* Receive BD Initiator Control Registers. */
9873                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9874                         0x00000000, 0xffffffff },
9875                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9876                         0x00000000, 0x000003ff },
9877                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9878                         0x00000000, 0xffffffff },
9879
9880                 /* Host Coalescing Control Registers. */
9881                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9882                         0x00000000, 0x00000004 },
9883                 { HOSTCC_MODE, TG3_FL_5705,
9884                         0x00000000, 0x000000f6 },
9885                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9886                         0x00000000, 0xffffffff },
9887                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9888                         0x00000000, 0x000003ff },
9889                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9890                         0x00000000, 0xffffffff },
9891                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9892                         0x00000000, 0x000003ff },
9893                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9894                         0x00000000, 0xffffffff },
9895                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9896                         0x00000000, 0x000000ff },
9897                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9898                         0x00000000, 0xffffffff },
9899                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9900                         0x00000000, 0x000000ff },
9901                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9902                         0x00000000, 0xffffffff },
9903                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9904                         0x00000000, 0xffffffff },
9905                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9906                         0x00000000, 0xffffffff },
9907                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9908                         0x00000000, 0x000000ff },
9909                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9910                         0x00000000, 0xffffffff },
9911                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9912                         0x00000000, 0x000000ff },
9913                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9914                         0x00000000, 0xffffffff },
9915                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9916                         0x00000000, 0xffffffff },
9917                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9918                         0x00000000, 0xffffffff },
9919                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9920                         0x00000000, 0xffffffff },
9921                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9922                         0x00000000, 0xffffffff },
9923                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9924                         0xffffffff, 0x00000000 },
9925                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9926                         0xffffffff, 0x00000000 },
9927
9928                 /* Buffer Manager Control Registers. */
9929                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9930                         0x00000000, 0x007fff80 },
9931                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9932                         0x00000000, 0x007fffff },
9933                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9934                         0x00000000, 0x0000003f },
9935                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9936                         0x00000000, 0x000001ff },
9937                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9938                         0x00000000, 0x000001ff },
9939                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9940                         0xffffffff, 0x00000000 },
9941                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9942                         0xffffffff, 0x00000000 },
9943
9944                 /* Mailbox Registers */
9945                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9946                         0x00000000, 0x000001ff },
9947                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9948                         0x00000000, 0x000001ff },
9949                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9950                         0x00000000, 0x000007ff },
9951                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9952                         0x00000000, 0x000001ff },
9953
9954                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9955         };
9956
9957         is_5705 = is_5750 = 0;
9958         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9959                 is_5705 = 1;
9960                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9961                         is_5750 = 1;
9962         }
9963
9964         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9965                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9966                         continue;
9967
9968                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9969                         continue;
9970
9971                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9972                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9973                         continue;
9974
9975                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9976                         continue;
9977
9978                 offset = (u32) reg_tbl[i].offset;
9979                 read_mask = reg_tbl[i].read_mask;
9980                 write_mask = reg_tbl[i].write_mask;
9981
9982                 /* Save the original register content */
9983                 save_val = tr32(offset);
9984
9985                 /* Determine the read-only value. */
9986                 read_val = save_val & read_mask;
9987
9988                 /* Write zero to the register, then make sure the read-only bits
9989                  * are not changed and the read/write bits are all zeros.
9990                  */
9991                 tw32(offset, 0);
9992
9993                 val = tr32(offset);
9994
9995                 /* Test the read-only and read/write bits. */
9996                 if (((val & read_mask) != read_val) || (val & write_mask))
9997                         goto out;
9998
9999                 /* Write ones to all the bits defined by RdMask and WrMask, then
10000                  * make sure the read-only bits are not changed and the
10001                  * read/write bits are all ones.
10002                  */
10003                 tw32(offset, read_mask | write_mask);
10004
10005                 val = tr32(offset);
10006
10007                 /* Test the read-only bits. */
10008                 if ((val & read_mask) != read_val)
10009                         goto out;
10010
10011                 /* Test the read/write bits. */
10012                 if ((val & write_mask) != write_mask)
10013                         goto out;
10014
10015                 tw32(offset, save_val);
10016         }
10017
10018         return 0;
10019
10020 out:
10021         if (netif_msg_hw(tp))
10022                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
10023                        offset);
10024         tw32(offset, save_val);
10025         return -EIO;
10026 }
10027
10028 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10029 {
10030         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10031         int i;
10032         u32 j;
10033
10034         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10035                 for (j = 0; j < len; j += 4) {
10036                         u32 val;
10037
10038                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10039                         tg3_read_mem(tp, offset + j, &val);
10040                         if (val != test_pattern[i])
10041                                 return -EIO;
10042                 }
10043         }
10044         return 0;
10045 }
10046
10047 static int tg3_test_memory(struct tg3 *tp)
10048 {
10049         static struct mem_entry {
10050                 u32 offset;
10051                 u32 len;
10052         } mem_tbl_570x[] = {
10053                 { 0x00000000, 0x00b50},
10054                 { 0x00002000, 0x1c000},
10055                 { 0xffffffff, 0x00000}
10056         }, mem_tbl_5705[] = {
10057                 { 0x00000100, 0x0000c},
10058                 { 0x00000200, 0x00008},
10059                 { 0x00004000, 0x00800},
10060                 { 0x00006000, 0x01000},
10061                 { 0x00008000, 0x02000},
10062                 { 0x00010000, 0x0e000},
10063                 { 0xffffffff, 0x00000}
10064         }, mem_tbl_5755[] = {
10065                 { 0x00000200, 0x00008},
10066                 { 0x00004000, 0x00800},
10067                 { 0x00006000, 0x00800},
10068                 { 0x00008000, 0x02000},
10069                 { 0x00010000, 0x0c000},
10070                 { 0xffffffff, 0x00000}
10071         }, mem_tbl_5906[] = {
10072                 { 0x00000200, 0x00008},
10073                 { 0x00004000, 0x00400},
10074                 { 0x00006000, 0x00400},
10075                 { 0x00008000, 0x01000},
10076                 { 0x00010000, 0x01000},
10077                 { 0xffffffff, 0x00000}
10078         };
10079         struct mem_entry *mem_tbl;
10080         int err = 0;
10081         int i;
10082
10083         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10084                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10085                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10086                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10087                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10088                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10089                         mem_tbl = mem_tbl_5755;
10090                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10091                         mem_tbl = mem_tbl_5906;
10092                 else
10093                         mem_tbl = mem_tbl_5705;
10094         } else
10095                 mem_tbl = mem_tbl_570x;
10096
10097         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10098                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10099                     mem_tbl[i].len)) != 0)
10100                         break;
10101         }
10102
10103         return err;
10104 }
10105
10106 #define TG3_MAC_LOOPBACK        0
10107 #define TG3_PHY_LOOPBACK        1
10108
10109 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10110 {
10111         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10112         u32 desc_idx;
10113         struct sk_buff *skb, *rx_skb;
10114         u8 *tx_data;
10115         dma_addr_t map;
10116         int num_pkts, tx_len, rx_len, i, err;
10117         struct tg3_rx_buffer_desc *desc;
10118
10119         if (loopback_mode == TG3_MAC_LOOPBACK) {
10120                 /* HW errata - mac loopback fails in some cases on 5780.
10121                  * Normal traffic and PHY loopback are not affected by
10122                  * errata.
10123                  */
10124                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10125                         return 0;
10126
10127                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10128                            MAC_MODE_PORT_INT_LPBACK;
10129                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10130                         mac_mode |= MAC_MODE_LINK_POLARITY;
10131                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10132                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10133                 else
10134                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10135                 tw32(MAC_MODE, mac_mode);
10136         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10137                 u32 val;
10138
10139                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10140                         u32 phytest;
10141
10142                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10143                                 u32 phy;
10144
10145                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10146                                              phytest | MII_TG3_EPHY_SHADOW_EN);
10147                                 if (!tg3_readphy(tp, 0x1b, &phy))
10148                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
10149                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10150                         }
10151                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10152                 } else
10153                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10154
10155                 tg3_phy_toggle_automdix(tp, 0);
10156
10157                 tg3_writephy(tp, MII_BMCR, val);
10158                 udelay(40);
10159
10160                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10161                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10162                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
10163                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10164                 } else
10165                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10166
10167                 /* reset to prevent losing 1st rx packet intermittently */
10168                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10169                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10170                         udelay(10);
10171                         tw32_f(MAC_RX_MODE, tp->rx_mode);
10172                 }
10173                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10174                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10175                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10176                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10177                                 mac_mode |= MAC_MODE_LINK_POLARITY;
10178                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
10179                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10180                 }
10181                 tw32(MAC_MODE, mac_mode);
10182         }
10183         else
10184                 return -EINVAL;
10185
10186         err = -EIO;
10187
10188         tx_len = 1514;
10189         skb = netdev_alloc_skb(tp->dev, tx_len);
10190         if (!skb)
10191                 return -ENOMEM;
10192
10193         tx_data = skb_put(skb, tx_len);
10194         memcpy(tx_data, tp->dev->dev_addr, 6);
10195         memset(tx_data + 6, 0x0, 8);
10196
10197         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10198
10199         for (i = 14; i < tx_len; i++)
10200                 tx_data[i] = (u8) (i & 0xff);
10201
10202         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10203
10204         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10205              HOSTCC_MODE_NOW);
10206
10207         udelay(10);
10208
10209         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10210
10211         num_pkts = 0;
10212
10213         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10214
10215         tp->tx_prod++;
10216         num_pkts++;
10217
10218         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10219                      tp->tx_prod);
10220         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10221
10222         udelay(10);
10223
10224         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10225         for (i = 0; i < 25; i++) {
10226                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10227                        HOSTCC_MODE_NOW);
10228
10229                 udelay(10);
10230
10231                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10232                 rx_idx = tp->hw_status->idx[0].rx_producer;
10233                 if ((tx_idx == tp->tx_prod) &&
10234                     (rx_idx == (rx_start_idx + num_pkts)))
10235                         break;
10236         }
10237
10238         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10239         dev_kfree_skb(skb);
10240
10241         if (tx_idx != tp->tx_prod)
10242                 goto out;
10243
10244         if (rx_idx != rx_start_idx + num_pkts)
10245                 goto out;
10246
10247         desc = &tp->rx_rcb[rx_start_idx];
10248         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10249         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10250         if (opaque_key != RXD_OPAQUE_RING_STD)
10251                 goto out;
10252
10253         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10254             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10255                 goto out;
10256
10257         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10258         if (rx_len != tx_len)
10259                 goto out;
10260
10261         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10262
10263         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10264         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10265
10266         for (i = 14; i < tx_len; i++) {
10267                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10268                         goto out;
10269         }
10270         err = 0;
10271
10272         /* tg3_free_rings will unmap and free the rx_skb */
10273 out:
10274         return err;
10275 }
10276
10277 #define TG3_MAC_LOOPBACK_FAILED         1
10278 #define TG3_PHY_LOOPBACK_FAILED         2
10279 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10280                                          TG3_PHY_LOOPBACK_FAILED)
10281
10282 static int tg3_test_loopback(struct tg3 *tp)
10283 {
10284         int err = 0;
10285         u32 cpmuctrl = 0;
10286
10287         if (!netif_running(tp->dev))
10288                 return TG3_LOOPBACK_FAILED;
10289
10290         err = tg3_reset_hw(tp, 1);
10291         if (err)
10292                 return TG3_LOOPBACK_FAILED;
10293
10294         /* Turn off gphy autopowerdown. */
10295         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10296                 tg3_phy_toggle_apd(tp, false);
10297
10298         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10299             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10300             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10301                 int i;
10302                 u32 status;
10303
10304                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10305
10306                 /* Wait for up to 40 microseconds to acquire lock. */
10307                 for (i = 0; i < 4; i++) {
10308                         status = tr32(TG3_CPMU_MUTEX_GNT);
10309                         if (status == CPMU_MUTEX_GNT_DRIVER)
10310                                 break;
10311                         udelay(10);
10312                 }
10313
10314                 if (status != CPMU_MUTEX_GNT_DRIVER)
10315                         return TG3_LOOPBACK_FAILED;
10316
10317                 /* Turn off link-based power management. */
10318                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10319                 tw32(TG3_CPMU_CTRL,
10320                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10321                                   CPMU_CTRL_LINK_AWARE_MODE));
10322         }
10323
10324         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10325                 err |= TG3_MAC_LOOPBACK_FAILED;
10326
10327         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10328             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10329             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10330                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10331
10332                 /* Release the mutex */
10333                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10334         }
10335
10336         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10337             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10338                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10339                         err |= TG3_PHY_LOOPBACK_FAILED;
10340         }
10341
10342         /* Re-enable gphy autopowerdown. */
10343         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10344                 tg3_phy_toggle_apd(tp, true);
10345
10346         return err;
10347 }
10348
10349 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10350                           u64 *data)
10351 {
10352         struct tg3 *tp = netdev_priv(dev);
10353
10354         if (tp->link_config.phy_is_low_power)
10355                 tg3_set_power_state(tp, PCI_D0);
10356
10357         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10358
10359         if (tg3_test_nvram(tp) != 0) {
10360                 etest->flags |= ETH_TEST_FL_FAILED;
10361                 data[0] = 1;
10362         }
10363         if (tg3_test_link(tp) != 0) {
10364                 etest->flags |= ETH_TEST_FL_FAILED;
10365                 data[1] = 1;
10366         }
10367         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10368                 int err, err2 = 0, irq_sync = 0;
10369
10370                 if (netif_running(dev)) {
10371                         tg3_phy_stop(tp);
10372                         tg3_netif_stop(tp);
10373                         irq_sync = 1;
10374                 }
10375
10376                 tg3_full_lock(tp, irq_sync);
10377
10378                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10379                 err = tg3_nvram_lock(tp);
10380                 tg3_halt_cpu(tp, RX_CPU_BASE);
10381                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10382                         tg3_halt_cpu(tp, TX_CPU_BASE);
10383                 if (!err)
10384                         tg3_nvram_unlock(tp);
10385
10386                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10387                         tg3_phy_reset(tp);
10388
10389                 if (tg3_test_registers(tp) != 0) {
10390                         etest->flags |= ETH_TEST_FL_FAILED;
10391                         data[2] = 1;
10392                 }
10393                 if (tg3_test_memory(tp) != 0) {
10394                         etest->flags |= ETH_TEST_FL_FAILED;
10395                         data[3] = 1;
10396                 }
10397                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10398                         etest->flags |= ETH_TEST_FL_FAILED;
10399
10400                 tg3_full_unlock(tp);
10401
10402                 if (tg3_test_interrupt(tp) != 0) {
10403                         etest->flags |= ETH_TEST_FL_FAILED;
10404                         data[5] = 1;
10405                 }
10406
10407                 tg3_full_lock(tp, 0);
10408
10409                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10410                 if (netif_running(dev)) {
10411                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10412                         err2 = tg3_restart_hw(tp, 1);
10413                         if (!err2)
10414                                 tg3_netif_start(tp);
10415                 }
10416
10417                 tg3_full_unlock(tp);
10418
10419                 if (irq_sync && !err2)
10420                         tg3_phy_start(tp);
10421         }
10422         if (tp->link_config.phy_is_low_power)
10423                 tg3_set_power_state(tp, PCI_D3hot);
10424
10425 }
10426
10427 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10428 {
10429         struct mii_ioctl_data *data = if_mii(ifr);
10430         struct tg3 *tp = netdev_priv(dev);
10431         int err;
10432
10433         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10434                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10435                         return -EAGAIN;
10436                 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10437         }
10438
10439         switch(cmd) {
10440         case SIOCGMIIPHY:
10441                 data->phy_id = PHY_ADDR;
10442
10443                 /* fallthru */
10444         case SIOCGMIIREG: {
10445                 u32 mii_regval;
10446
10447                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10448                         break;                  /* We have no PHY */
10449
10450                 if (tp->link_config.phy_is_low_power)
10451                         return -EAGAIN;
10452
10453                 spin_lock_bh(&tp->lock);
10454                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10455                 spin_unlock_bh(&tp->lock);
10456
10457                 data->val_out = mii_regval;
10458
10459                 return err;
10460         }
10461
10462         case SIOCSMIIREG:
10463                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10464                         break;                  /* We have no PHY */
10465
10466                 if (!capable(CAP_NET_ADMIN))
10467                         return -EPERM;
10468
10469                 if (tp->link_config.phy_is_low_power)
10470                         return -EAGAIN;
10471
10472                 spin_lock_bh(&tp->lock);
10473                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10474                 spin_unlock_bh(&tp->lock);
10475
10476                 return err;
10477
10478         default:
10479                 /* do nothing */
10480                 break;
10481         }
10482         return -EOPNOTSUPP;
10483 }
10484
10485 #if TG3_VLAN_TAG_USED
10486 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10487 {
10488         struct tg3 *tp = netdev_priv(dev);
10489
10490         if (netif_running(dev))
10491                 tg3_netif_stop(tp);
10492
10493         tg3_full_lock(tp, 0);
10494
10495         tp->vlgrp = grp;
10496
10497         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10498         __tg3_set_rx_mode(dev);
10499
10500         if (netif_running(dev))
10501                 tg3_netif_start(tp);
10502
10503         tg3_full_unlock(tp);
10504 }
10505 #endif
10506
10507 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10508 {
10509         struct tg3 *tp = netdev_priv(dev);
10510
10511         memcpy(ec, &tp->coal, sizeof(*ec));
10512         return 0;
10513 }
10514
10515 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10516 {
10517         struct tg3 *tp = netdev_priv(dev);
10518         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10519         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10520
10521         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10522                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10523                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10524                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10525                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10526         }
10527
10528         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10529             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10530             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10531             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10532             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10533             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10534             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10535             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10536             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10537             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10538                 return -EINVAL;
10539
10540         /* No rx interrupts will be generated if both are zero */
10541         if ((ec->rx_coalesce_usecs == 0) &&
10542             (ec->rx_max_coalesced_frames == 0))
10543                 return -EINVAL;
10544
10545         /* No tx interrupts will be generated if both are zero */
10546         if ((ec->tx_coalesce_usecs == 0) &&
10547             (ec->tx_max_coalesced_frames == 0))
10548                 return -EINVAL;
10549
10550         /* Only copy relevant parameters, ignore all others. */
10551         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10552         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10553         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10554         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10555         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10556         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10557         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10558         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10559         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10560
10561         if (netif_running(dev)) {
10562                 tg3_full_lock(tp, 0);
10563                 __tg3_set_coalesce(tp, &tp->coal);
10564                 tg3_full_unlock(tp);
10565         }
10566         return 0;
10567 }
10568
10569 static const struct ethtool_ops tg3_ethtool_ops = {
10570         .get_settings           = tg3_get_settings,
10571         .set_settings           = tg3_set_settings,
10572         .get_drvinfo            = tg3_get_drvinfo,
10573         .get_regs_len           = tg3_get_regs_len,
10574         .get_regs               = tg3_get_regs,
10575         .get_wol                = tg3_get_wol,
10576         .set_wol                = tg3_set_wol,
10577         .get_msglevel           = tg3_get_msglevel,
10578         .set_msglevel           = tg3_set_msglevel,
10579         .nway_reset             = tg3_nway_reset,
10580         .get_link               = ethtool_op_get_link,
10581         .get_eeprom_len         = tg3_get_eeprom_len,
10582         .get_eeprom             = tg3_get_eeprom,
10583         .set_eeprom             = tg3_set_eeprom,
10584         .get_ringparam          = tg3_get_ringparam,
10585         .set_ringparam          = tg3_set_ringparam,
10586         .get_pauseparam         = tg3_get_pauseparam,
10587         .set_pauseparam         = tg3_set_pauseparam,
10588         .get_rx_csum            = tg3_get_rx_csum,
10589         .set_rx_csum            = tg3_set_rx_csum,
10590         .set_tx_csum            = tg3_set_tx_csum,
10591         .set_sg                 = ethtool_op_set_sg,
10592         .set_tso                = tg3_set_tso,
10593         .self_test              = tg3_self_test,
10594         .get_strings            = tg3_get_strings,
10595         .phys_id                = tg3_phys_id,
10596         .get_ethtool_stats      = tg3_get_ethtool_stats,
10597         .get_coalesce           = tg3_get_coalesce,
10598         .set_coalesce           = tg3_set_coalesce,
10599         .get_sset_count         = tg3_get_sset_count,
10600 };
10601
10602 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10603 {
10604         u32 cursize, val, magic;
10605
10606         tp->nvram_size = EEPROM_CHIP_SIZE;
10607
10608         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10609                 return;
10610
10611         if ((magic != TG3_EEPROM_MAGIC) &&
10612             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10613             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10614                 return;
10615
10616         /*
10617          * Size the chip by reading offsets at increasing powers of two.
10618          * When we encounter our validation signature, we know the addressing
10619          * has wrapped around, and thus have our chip size.
10620          */
10621         cursize = 0x10;
10622
10623         while (cursize < tp->nvram_size) {
10624                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10625                         return;
10626
10627                 if (val == magic)
10628                         break;
10629
10630                 cursize <<= 1;
10631         }
10632
10633         tp->nvram_size = cursize;
10634 }
10635
10636 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10637 {
10638         u32 val;
10639
10640         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10641                 return;
10642
10643         /* Selfboot format */
10644         if (val != TG3_EEPROM_MAGIC) {
10645                 tg3_get_eeprom_size(tp);
10646                 return;
10647         }
10648
10649         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10650                 if (val != 0) {
10651                         tp->nvram_size = (val >> 16) * 1024;
10652                         return;
10653                 }
10654         }
10655         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10656 }
10657
10658 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10659 {
10660         u32 nvcfg1;
10661
10662         nvcfg1 = tr32(NVRAM_CFG1);
10663         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10664                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10665         }
10666         else {
10667                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10668                 tw32(NVRAM_CFG1, nvcfg1);
10669         }
10670
10671         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10672             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10673                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10674                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10675                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10676                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10677                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10678                                 break;
10679                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10680                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10681                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10682                                 break;
10683                         case FLASH_VENDOR_ATMEL_EEPROM:
10684                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10685                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10686                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10687                                 break;
10688                         case FLASH_VENDOR_ST:
10689                                 tp->nvram_jedecnum = JEDEC_ST;
10690                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10691                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10692                                 break;
10693                         case FLASH_VENDOR_SAIFUN:
10694                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10695                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10696                                 break;
10697                         case FLASH_VENDOR_SST_SMALL:
10698                         case FLASH_VENDOR_SST_LARGE:
10699                                 tp->nvram_jedecnum = JEDEC_SST;
10700                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10701                                 break;
10702                 }
10703         }
10704         else {
10705                 tp->nvram_jedecnum = JEDEC_ATMEL;
10706                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10707                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10708         }
10709 }
10710
10711 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10712 {
10713         u32 nvcfg1;
10714
10715         nvcfg1 = tr32(NVRAM_CFG1);
10716
10717         /* NVRAM protection for TPM */
10718         if (nvcfg1 & (1 << 27))
10719                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10720
10721         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10722                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10723                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10724                         tp->nvram_jedecnum = JEDEC_ATMEL;
10725                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10726                         break;
10727                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10728                         tp->nvram_jedecnum = JEDEC_ATMEL;
10729                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10730                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10731                         break;
10732                 case FLASH_5752VENDOR_ST_M45PE10:
10733                 case FLASH_5752VENDOR_ST_M45PE20:
10734                 case FLASH_5752VENDOR_ST_M45PE40:
10735                         tp->nvram_jedecnum = JEDEC_ST;
10736                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10737                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10738                         break;
10739         }
10740
10741         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10742                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10743                         case FLASH_5752PAGE_SIZE_256:
10744                                 tp->nvram_pagesize = 256;
10745                                 break;
10746                         case FLASH_5752PAGE_SIZE_512:
10747                                 tp->nvram_pagesize = 512;
10748                                 break;
10749                         case FLASH_5752PAGE_SIZE_1K:
10750                                 tp->nvram_pagesize = 1024;
10751                                 break;
10752                         case FLASH_5752PAGE_SIZE_2K:
10753                                 tp->nvram_pagesize = 2048;
10754                                 break;
10755                         case FLASH_5752PAGE_SIZE_4K:
10756                                 tp->nvram_pagesize = 4096;
10757                                 break;
10758                         case FLASH_5752PAGE_SIZE_264:
10759                                 tp->nvram_pagesize = 264;
10760                                 break;
10761                 }
10762         }
10763         else {
10764                 /* For eeprom, set pagesize to maximum eeprom size */
10765                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10766
10767                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10768                 tw32(NVRAM_CFG1, nvcfg1);
10769         }
10770 }
10771
10772 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10773 {
10774         u32 nvcfg1, protect = 0;
10775
10776         nvcfg1 = tr32(NVRAM_CFG1);
10777
10778         /* NVRAM protection for TPM */
10779         if (nvcfg1 & (1 << 27)) {
10780                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10781                 protect = 1;
10782         }
10783
10784         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10785         switch (nvcfg1) {
10786                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10787                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10788                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10789                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10790                         tp->nvram_jedecnum = JEDEC_ATMEL;
10791                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10792                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10793                         tp->nvram_pagesize = 264;
10794                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10795                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10796                                 tp->nvram_size = (protect ? 0x3e200 :
10797                                                   TG3_NVRAM_SIZE_512KB);
10798                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10799                                 tp->nvram_size = (protect ? 0x1f200 :
10800                                                   TG3_NVRAM_SIZE_256KB);
10801                         else
10802                                 tp->nvram_size = (protect ? 0x1f200 :
10803                                                   TG3_NVRAM_SIZE_128KB);
10804                         break;
10805                 case FLASH_5752VENDOR_ST_M45PE10:
10806                 case FLASH_5752VENDOR_ST_M45PE20:
10807                 case FLASH_5752VENDOR_ST_M45PE40:
10808                         tp->nvram_jedecnum = JEDEC_ST;
10809                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10810                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10811                         tp->nvram_pagesize = 256;
10812                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10813                                 tp->nvram_size = (protect ?
10814                                                   TG3_NVRAM_SIZE_64KB :
10815                                                   TG3_NVRAM_SIZE_128KB);
10816                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10817                                 tp->nvram_size = (protect ?
10818                                                   TG3_NVRAM_SIZE_64KB :
10819                                                   TG3_NVRAM_SIZE_256KB);
10820                         else
10821                                 tp->nvram_size = (protect ?
10822                                                   TG3_NVRAM_SIZE_128KB :
10823                                                   TG3_NVRAM_SIZE_512KB);
10824                         break;
10825         }
10826 }
10827
10828 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10829 {
10830         u32 nvcfg1;
10831
10832         nvcfg1 = tr32(NVRAM_CFG1);
10833
10834         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10835                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10836                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10837                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10838                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10839                         tp->nvram_jedecnum = JEDEC_ATMEL;
10840                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10841                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10842
10843                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10844                         tw32(NVRAM_CFG1, nvcfg1);
10845                         break;
10846                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10847                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10848                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10849                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10850                         tp->nvram_jedecnum = JEDEC_ATMEL;
10851                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10852                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10853                         tp->nvram_pagesize = 264;
10854                         break;
10855                 case FLASH_5752VENDOR_ST_M45PE10:
10856                 case FLASH_5752VENDOR_ST_M45PE20:
10857                 case FLASH_5752VENDOR_ST_M45PE40:
10858                         tp->nvram_jedecnum = JEDEC_ST;
10859                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10860                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10861                         tp->nvram_pagesize = 256;
10862                         break;
10863         }
10864 }
10865
10866 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10867 {
10868         u32 nvcfg1, protect = 0;
10869
10870         nvcfg1 = tr32(NVRAM_CFG1);
10871
10872         /* NVRAM protection for TPM */
10873         if (nvcfg1 & (1 << 27)) {
10874                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10875                 protect = 1;
10876         }
10877
10878         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10879         switch (nvcfg1) {
10880                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10881                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10882                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10883                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10884                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10885                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10886                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10887                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10888                         tp->nvram_jedecnum = JEDEC_ATMEL;
10889                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10890                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10891                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10892                         tp->nvram_pagesize = 256;
10893                         break;
10894                 case FLASH_5761VENDOR_ST_A_M45PE20:
10895                 case FLASH_5761VENDOR_ST_A_M45PE40:
10896                 case FLASH_5761VENDOR_ST_A_M45PE80:
10897                 case FLASH_5761VENDOR_ST_A_M45PE16:
10898                 case FLASH_5761VENDOR_ST_M_M45PE20:
10899                 case FLASH_5761VENDOR_ST_M_M45PE40:
10900                 case FLASH_5761VENDOR_ST_M_M45PE80:
10901                 case FLASH_5761VENDOR_ST_M_M45PE16:
10902                         tp->nvram_jedecnum = JEDEC_ST;
10903                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10904                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10905                         tp->nvram_pagesize = 256;
10906                         break;
10907         }
10908
10909         if (protect) {
10910                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10911         } else {
10912                 switch (nvcfg1) {
10913                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10914                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10915                         case FLASH_5761VENDOR_ST_A_M45PE16:
10916                         case FLASH_5761VENDOR_ST_M_M45PE16:
10917                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10918                                 break;
10919                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10920                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10921                         case FLASH_5761VENDOR_ST_A_M45PE80:
10922                         case FLASH_5761VENDOR_ST_M_M45PE80:
10923                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10924                                 break;
10925                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10926                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10927                         case FLASH_5761VENDOR_ST_A_M45PE40:
10928                         case FLASH_5761VENDOR_ST_M_M45PE40:
10929                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10930                                 break;
10931                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10932                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10933                         case FLASH_5761VENDOR_ST_A_M45PE20:
10934                         case FLASH_5761VENDOR_ST_M_M45PE20:
10935                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10936                                 break;
10937                 }
10938         }
10939 }
10940
10941 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10942 {
10943         tp->nvram_jedecnum = JEDEC_ATMEL;
10944         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10945         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10946 }
10947
10948 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10949 static void __devinit tg3_nvram_init(struct tg3 *tp)
10950 {
10951         tw32_f(GRC_EEPROM_ADDR,
10952              (EEPROM_ADDR_FSM_RESET |
10953               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10954                EEPROM_ADDR_CLKPERD_SHIFT)));
10955
10956         msleep(1);
10957
10958         /* Enable seeprom accesses. */
10959         tw32_f(GRC_LOCAL_CTRL,
10960              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10961         udelay(100);
10962
10963         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10964             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10965                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10966
10967                 if (tg3_nvram_lock(tp)) {
10968                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10969                                "tg3_nvram_init failed.\n", tp->dev->name);
10970                         return;
10971                 }
10972                 tg3_enable_nvram_access(tp);
10973
10974                 tp->nvram_size = 0;
10975
10976                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10977                         tg3_get_5752_nvram_info(tp);
10978                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10979                         tg3_get_5755_nvram_info(tp);
10980                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10981                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10982                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10983                         tg3_get_5787_nvram_info(tp);
10984                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10985                         tg3_get_5761_nvram_info(tp);
10986                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10987                         tg3_get_5906_nvram_info(tp);
10988                 else
10989                         tg3_get_nvram_info(tp);
10990
10991                 if (tp->nvram_size == 0)
10992                         tg3_get_nvram_size(tp);
10993
10994                 tg3_disable_nvram_access(tp);
10995                 tg3_nvram_unlock(tp);
10996
10997         } else {
10998                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10999
11000                 tg3_get_eeprom_size(tp);
11001         }
11002 }
11003
11004 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
11005                                         u32 offset, u32 *val)
11006 {
11007         u32 tmp;
11008         int i;
11009
11010         if (offset > EEPROM_ADDR_ADDR_MASK ||
11011             (offset % 4) != 0)
11012                 return -EINVAL;
11013
11014         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
11015                                         EEPROM_ADDR_DEVID_MASK |
11016                                         EEPROM_ADDR_READ);
11017         tw32(GRC_EEPROM_ADDR,
11018              tmp |
11019              (0 << EEPROM_ADDR_DEVID_SHIFT) |
11020              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
11021               EEPROM_ADDR_ADDR_MASK) |
11022              EEPROM_ADDR_READ | EEPROM_ADDR_START);
11023
11024         for (i = 0; i < 1000; i++) {
11025                 tmp = tr32(GRC_EEPROM_ADDR);
11026
11027                 if (tmp & EEPROM_ADDR_COMPLETE)
11028                         break;
11029                 msleep(1);
11030         }
11031         if (!(tmp & EEPROM_ADDR_COMPLETE))
11032                 return -EBUSY;
11033
11034         *val = tr32(GRC_EEPROM_DATA);
11035         return 0;
11036 }
11037
11038 #define NVRAM_CMD_TIMEOUT 10000
11039
11040 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
11041 {
11042         int i;
11043
11044         tw32(NVRAM_CMD, nvram_cmd);
11045         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
11046                 udelay(10);
11047                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
11048                         udelay(10);
11049                         break;
11050                 }
11051         }
11052         if (i == NVRAM_CMD_TIMEOUT) {
11053                 return -EBUSY;
11054         }
11055         return 0;
11056 }
11057
11058 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
11059 {
11060         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
11061             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
11062             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
11063            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
11064             (tp->nvram_jedecnum == JEDEC_ATMEL))
11065
11066                 addr = ((addr / tp->nvram_pagesize) <<
11067                         ATMEL_AT45DB0X1B_PAGE_POS) +
11068                        (addr % tp->nvram_pagesize);
11069
11070         return addr;
11071 }
11072
11073 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
11074 {
11075         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
11076             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
11077             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
11078            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
11079             (tp->nvram_jedecnum == JEDEC_ATMEL))
11080
11081                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
11082                         tp->nvram_pagesize) +
11083                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
11084
11085         return addr;
11086 }
11087
11088 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
11089 {
11090         int ret;
11091
11092         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
11093                 return tg3_nvram_read_using_eeprom(tp, offset, val);
11094
11095         offset = tg3_nvram_phys_addr(tp, offset);
11096
11097         if (offset > NVRAM_ADDR_MSK)
11098                 return -EINVAL;
11099
11100         ret = tg3_nvram_lock(tp);
11101         if (ret)
11102                 return ret;
11103
11104         tg3_enable_nvram_access(tp);
11105
11106         tw32(NVRAM_ADDR, offset);
11107         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
11108                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
11109
11110         if (ret == 0)
11111                 *val = swab32(tr32(NVRAM_RDDATA));
11112
11113         tg3_disable_nvram_access(tp);
11114
11115         tg3_nvram_unlock(tp);
11116
11117         return ret;
11118 }
11119
11120 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
11121 {
11122         u32 v;
11123         int res = tg3_nvram_read(tp, offset, &v);
11124         if (!res)
11125                 *val = cpu_to_le32(v);
11126         return res;
11127 }
11128
11129 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11130 {
11131         int err;
11132         u32 tmp;
11133
11134         err = tg3_nvram_read(tp, offset, &tmp);
11135         *val = swab32(tmp);
11136         return err;
11137 }
11138
11139 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11140                                     u32 offset, u32 len, u8 *buf)
11141 {
11142         int i, j, rc = 0;
11143         u32 val;
11144
11145         for (i = 0; i < len; i += 4) {
11146                 u32 addr;
11147                 __le32 data;
11148
11149                 addr = offset + i;
11150
11151                 memcpy(&data, buf + i, 4);
11152
11153                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
11154
11155                 val = tr32(GRC_EEPROM_ADDR);
11156                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11157
11158                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11159                         EEPROM_ADDR_READ);
11160                 tw32(GRC_EEPROM_ADDR, val |
11161                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
11162                         (addr & EEPROM_ADDR_ADDR_MASK) |
11163                         EEPROM_ADDR_START |
11164                         EEPROM_ADDR_WRITE);
11165
11166                 for (j = 0; j < 1000; j++) {
11167                         val = tr32(GRC_EEPROM_ADDR);
11168
11169                         if (val & EEPROM_ADDR_COMPLETE)
11170                                 break;
11171                         msleep(1);
11172                 }
11173                 if (!(val & EEPROM_ADDR_COMPLETE)) {
11174                         rc = -EBUSY;
11175                         break;
11176                 }
11177         }
11178
11179         return rc;
11180 }
11181
11182 /* offset and length are dword aligned */
11183 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11184                 u8 *buf)
11185 {
11186         int ret = 0;
11187         u32 pagesize = tp->nvram_pagesize;
11188         u32 pagemask = pagesize - 1;
11189         u32 nvram_cmd;
11190         u8 *tmp;
11191
11192         tmp = kmalloc(pagesize, GFP_KERNEL);
11193         if (tmp == NULL)
11194                 return -ENOMEM;
11195
11196         while (len) {
11197                 int j;
11198                 u32 phy_addr, page_off, size;
11199
11200                 phy_addr = offset & ~pagemask;
11201
11202                 for (j = 0; j < pagesize; j += 4) {
11203                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11204                                                 (__le32 *) (tmp + j))))
11205                                 break;
11206                 }
11207                 if (ret)
11208                         break;
11209
11210                 page_off = offset & pagemask;
11211                 size = pagesize;
11212                 if (len < size)
11213                         size = len;
11214
11215                 len -= size;
11216
11217                 memcpy(tmp + page_off, buf, size);
11218
11219                 offset = offset + (pagesize - page_off);
11220
11221                 tg3_enable_nvram_access(tp);
11222
11223                 /*
11224                  * Before we can erase the flash page, we need
11225                  * to issue a special "write enable" command.
11226                  */
11227                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11228
11229                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11230                         break;
11231
11232                 /* Erase the target page */
11233                 tw32(NVRAM_ADDR, phy_addr);
11234
11235                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11236                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11237
11238                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11239                         break;
11240
11241                 /* Issue another write enable to start the write. */
11242                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11243
11244                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11245                         break;
11246
11247                 for (j = 0; j < pagesize; j += 4) {
11248                         __be32 data;
11249
11250                         data = *((__be32 *) (tmp + j));
11251                         /* swab32(le32_to_cpu(data)), actually */
11252                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11253
11254                         tw32(NVRAM_ADDR, phy_addr + j);
11255
11256                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11257                                 NVRAM_CMD_WR;
11258
11259                         if (j == 0)
11260                                 nvram_cmd |= NVRAM_CMD_FIRST;
11261                         else if (j == (pagesize - 4))
11262                                 nvram_cmd |= NVRAM_CMD_LAST;
11263
11264                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11265                                 break;
11266                 }
11267                 if (ret)
11268                         break;
11269         }
11270
11271         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11272         tg3_nvram_exec_cmd(tp, nvram_cmd);
11273
11274         kfree(tmp);
11275
11276         return ret;
11277 }
11278
11279 /* offset and length are dword aligned */
11280 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11281                 u8 *buf)
11282 {
11283         int i, ret = 0;
11284
11285         for (i = 0; i < len; i += 4, offset += 4) {
11286                 u32 page_off, phy_addr, nvram_cmd;
11287                 __be32 data;
11288
11289                 memcpy(&data, buf + i, 4);
11290                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11291
11292                 page_off = offset % tp->nvram_pagesize;
11293
11294                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11295
11296                 tw32(NVRAM_ADDR, phy_addr);
11297
11298                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11299
11300                 if ((page_off == 0) || (i == 0))
11301                         nvram_cmd |= NVRAM_CMD_FIRST;
11302                 if (page_off == (tp->nvram_pagesize - 4))
11303                         nvram_cmd |= NVRAM_CMD_LAST;
11304
11305                 if (i == (len - 4))
11306                         nvram_cmd |= NVRAM_CMD_LAST;
11307
11308                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11309                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11310                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11311                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11312                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11313                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11314                     (tp->nvram_jedecnum == JEDEC_ST) &&
11315                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11316
11317                         if ((ret = tg3_nvram_exec_cmd(tp,
11318                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11319                                 NVRAM_CMD_DONE)))
11320
11321                                 break;
11322                 }
11323                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11324                         /* We always do complete word writes to eeprom. */
11325                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11326                 }
11327
11328                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11329                         break;
11330         }
11331         return ret;
11332 }
11333
11334 /* offset and length are dword aligned */
11335 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11336 {
11337         int ret;
11338
11339         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11340                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11341                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11342                 udelay(40);
11343         }
11344
11345         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11346                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11347         }
11348         else {
11349                 u32 grc_mode;
11350
11351                 ret = tg3_nvram_lock(tp);
11352                 if (ret)
11353                         return ret;
11354
11355                 tg3_enable_nvram_access(tp);
11356                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11357                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11358                         tw32(NVRAM_WRITE1, 0x406);
11359
11360                 grc_mode = tr32(GRC_MODE);
11361                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11362
11363                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11364                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11365
11366                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11367                                 buf);
11368                 }
11369                 else {
11370                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11371                                 buf);
11372                 }
11373
11374                 grc_mode = tr32(GRC_MODE);
11375                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11376
11377                 tg3_disable_nvram_access(tp);
11378                 tg3_nvram_unlock(tp);
11379         }
11380
11381         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11382                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11383                 udelay(40);
11384         }
11385
11386         return ret;
11387 }
11388
11389 struct subsys_tbl_ent {
11390         u16 subsys_vendor, subsys_devid;
11391         u32 phy_id;
11392 };
11393
11394 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11395         /* Broadcom boards. */
11396         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11397         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11398         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11399         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11400         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11401         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11402         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11403         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11404         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11405         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11406         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11407
11408         /* 3com boards. */
11409         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11410         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11411         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11412         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11413         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11414
11415         /* DELL boards. */
11416         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11417         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11418         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11419         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11420
11421         /* Compaq boards. */
11422         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11423         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11424         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11425         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11426         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11427
11428         /* IBM boards. */
11429         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11430 };
11431
11432 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11433 {
11434         int i;
11435
11436         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11437                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11438                      tp->pdev->subsystem_vendor) &&
11439                     (subsys_id_to_phy_id[i].subsys_devid ==
11440                      tp->pdev->subsystem_device))
11441                         return &subsys_id_to_phy_id[i];
11442         }
11443         return NULL;
11444 }
11445
11446 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11447 {
11448         u32 val;
11449         u16 pmcsr;
11450
11451         /* On some early chips the SRAM cannot be accessed in D3hot state,
11452          * so need make sure we're in D0.
11453          */
11454         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11455         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11456         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11457         msleep(1);
11458
11459         /* Make sure register accesses (indirect or otherwise)
11460          * will function correctly.
11461          */
11462         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11463                                tp->misc_host_ctrl);
11464
11465         /* The memory arbiter has to be enabled in order for SRAM accesses
11466          * to succeed.  Normally on powerup the tg3 chip firmware will make
11467          * sure it is enabled, but other entities such as system netboot
11468          * code might disable it.
11469          */
11470         val = tr32(MEMARB_MODE);
11471         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11472
11473         tp->phy_id = PHY_ID_INVALID;
11474         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11475
11476         /* Assume an onboard device and WOL capable by default.  */
11477         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11478
11479         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11480                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11481                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11482                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11483                 }
11484                 val = tr32(VCPU_CFGSHDW);
11485                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11486                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11487                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11488                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11489                     device_may_wakeup(&tp->pdev->dev))
11490                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11491                 goto done;
11492         }
11493
11494         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11495         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11496                 u32 nic_cfg, led_cfg;
11497                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11498                 int eeprom_phy_serdes = 0;
11499
11500                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11501                 tp->nic_sram_data_cfg = nic_cfg;
11502
11503                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11504                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11505                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11506                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11507                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11508                     (ver > 0) && (ver < 0x100))
11509                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11510
11511                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11512                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11513
11514                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11515                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11516                         eeprom_phy_serdes = 1;
11517
11518                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11519                 if (nic_phy_id != 0) {
11520                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11521                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11522
11523                         eeprom_phy_id  = (id1 >> 16) << 10;
11524                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11525                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11526                 } else
11527                         eeprom_phy_id = 0;
11528
11529                 tp->phy_id = eeprom_phy_id;
11530                 if (eeprom_phy_serdes) {
11531                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11532                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11533                         else
11534                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11535                 }
11536
11537                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11538                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11539                                     SHASTA_EXT_LED_MODE_MASK);
11540                 else
11541                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11542
11543                 switch (led_cfg) {
11544                 default:
11545                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11546                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11547                         break;
11548
11549                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11550                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11551                         break;
11552
11553                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11554                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11555
11556                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11557                          * read on some older 5700/5701 bootcode.
11558                          */
11559                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11560                             ASIC_REV_5700 ||
11561                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11562                             ASIC_REV_5701)
11563                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11564
11565                         break;
11566
11567                 case SHASTA_EXT_LED_SHARED:
11568                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11569                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11570                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11571                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11572                                                  LED_CTRL_MODE_PHY_2);
11573                         break;
11574
11575                 case SHASTA_EXT_LED_MAC:
11576                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11577                         break;
11578
11579                 case SHASTA_EXT_LED_COMBO:
11580                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11581                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11582                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11583                                                  LED_CTRL_MODE_PHY_2);
11584                         break;
11585
11586                 }
11587
11588                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11589                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11590                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11591                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11592
11593                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11594                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11595
11596                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11597                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11598                         if ((tp->pdev->subsystem_vendor ==
11599                              PCI_VENDOR_ID_ARIMA) &&
11600                             (tp->pdev->subsystem_device == 0x205a ||
11601                              tp->pdev->subsystem_device == 0x2063))
11602                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11603                 } else {
11604                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11605                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11606                 }
11607
11608                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11609                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11610                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11611                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11612                 }
11613
11614                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11615                         (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11616                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11617
11618                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11619                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11620                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11621
11622                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11623                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11624                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11625
11626                 if (cfg2 & (1 << 17))
11627                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11628
11629                 /* serdes signal pre-emphasis in register 0x590 set by */
11630                 /* bootcode if bit 18 is set */
11631                 if (cfg2 & (1 << 18))
11632                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11633
11634                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11635                     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX &&
11636                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11637                         tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11638
11639                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11640                         u32 cfg3;
11641
11642                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11643                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11644                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11645                 }
11646
11647                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11648                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11649                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11650                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11651                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11652                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11653         }
11654 done:
11655         device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11656         device_set_wakeup_enable(&tp->pdev->dev,
11657                                  tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11658 }
11659
11660 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11661 {
11662         int i;
11663         u32 val;
11664
11665         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11666         tw32(OTP_CTRL, cmd);
11667
11668         /* Wait for up to 1 ms for command to execute. */
11669         for (i = 0; i < 100; i++) {
11670                 val = tr32(OTP_STATUS);
11671                 if (val & OTP_STATUS_CMD_DONE)
11672                         break;
11673                 udelay(10);
11674         }
11675
11676         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11677 }
11678
11679 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11680  * configuration is a 32-bit value that straddles the alignment boundary.
11681  * We do two 32-bit reads and then shift and merge the results.
11682  */
11683 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11684 {
11685         u32 bhalf_otp, thalf_otp;
11686
11687         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11688
11689         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11690                 return 0;
11691
11692         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11693
11694         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11695                 return 0;
11696
11697         thalf_otp = tr32(OTP_READ_DATA);
11698
11699         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11700
11701         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11702                 return 0;
11703
11704         bhalf_otp = tr32(OTP_READ_DATA);
11705
11706         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11707 }
11708
11709 static int __devinit tg3_phy_probe(struct tg3 *tp)
11710 {
11711         u32 hw_phy_id_1, hw_phy_id_2;
11712         u32 hw_phy_id, hw_phy_id_masked;
11713         int err;
11714
11715         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11716                 return tg3_phy_init(tp);
11717
11718         /* Reading the PHY ID register can conflict with ASF
11719          * firwmare access to the PHY hardware.
11720          */
11721         err = 0;
11722         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11723             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11724                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11725         } else {
11726                 /* Now read the physical PHY_ID from the chip and verify
11727                  * that it is sane.  If it doesn't look good, we fall back
11728                  * to either the hard-coded table based PHY_ID and failing
11729                  * that the value found in the eeprom area.
11730                  */
11731                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11732                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11733
11734                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11735                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11736                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11737
11738                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11739         }
11740
11741         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11742                 tp->phy_id = hw_phy_id;
11743                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11744                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11745                 else
11746                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11747         } else {
11748                 if (tp->phy_id != PHY_ID_INVALID) {
11749                         /* Do nothing, phy ID already set up in
11750                          * tg3_get_eeprom_hw_cfg().
11751                          */
11752                 } else {
11753                         struct subsys_tbl_ent *p;
11754
11755                         /* No eeprom signature?  Try the hardcoded
11756                          * subsys device table.
11757                          */
11758                         p = lookup_by_subsys(tp);
11759                         if (!p)
11760                                 return -ENODEV;
11761
11762                         tp->phy_id = p->phy_id;
11763                         if (!tp->phy_id ||
11764                             tp->phy_id == PHY_ID_BCM8002)
11765                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11766                 }
11767         }
11768
11769         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11770             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11771             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11772                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11773
11774                 tg3_readphy(tp, MII_BMSR, &bmsr);
11775                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11776                     (bmsr & BMSR_LSTATUS))
11777                         goto skip_phy_reset;
11778
11779                 err = tg3_phy_reset(tp);
11780                 if (err)
11781                         return err;
11782
11783                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11784                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11785                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11786                 tg3_ctrl = 0;
11787                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11788                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11789                                     MII_TG3_CTRL_ADV_1000_FULL);
11790                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11791                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11792                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11793                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11794                 }
11795
11796                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11797                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11798                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11799                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11800                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11801
11802                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11803                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11804
11805                         tg3_writephy(tp, MII_BMCR,
11806                                      BMCR_ANENABLE | BMCR_ANRESTART);
11807                 }
11808                 tg3_phy_set_wirespeed(tp);
11809
11810                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11811                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11812                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11813         }
11814
11815 skip_phy_reset:
11816         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11817                 err = tg3_init_5401phy_dsp(tp);
11818                 if (err)
11819                         return err;
11820         }
11821
11822         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11823                 err = tg3_init_5401phy_dsp(tp);
11824         }
11825
11826         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11827                 tp->link_config.advertising =
11828                         (ADVERTISED_1000baseT_Half |
11829                          ADVERTISED_1000baseT_Full |
11830                          ADVERTISED_Autoneg |
11831                          ADVERTISED_FIBRE);
11832         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11833                 tp->link_config.advertising &=
11834                         ~(ADVERTISED_1000baseT_Half |
11835                           ADVERTISED_1000baseT_Full);
11836
11837         return err;
11838 }
11839
11840 static void __devinit tg3_read_partno(struct tg3 *tp)
11841 {
11842         unsigned char vpd_data[256];
11843         unsigned int i;
11844         u32 magic;
11845
11846         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11847                 goto out_not_found;
11848
11849         if (magic == TG3_EEPROM_MAGIC) {
11850                 for (i = 0; i < 256; i += 4) {
11851                         u32 tmp;
11852
11853                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11854                                 goto out_not_found;
11855
11856                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11857                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11858                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11859                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11860                 }
11861         } else {
11862                 int vpd_cap;
11863
11864                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11865                 for (i = 0; i < 256; i += 4) {
11866                         u32 tmp, j = 0;
11867                         __le32 v;
11868                         u16 tmp16;
11869
11870                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11871                                               i);
11872                         while (j++ < 100) {
11873                                 pci_read_config_word(tp->pdev, vpd_cap +
11874                                                      PCI_VPD_ADDR, &tmp16);
11875                                 if (tmp16 & 0x8000)
11876                                         break;
11877                                 msleep(1);
11878                         }
11879                         if (!(tmp16 & 0x8000))
11880                                 goto out_not_found;
11881
11882                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11883                                               &tmp);
11884                         v = cpu_to_le32(tmp);
11885                         memcpy(&vpd_data[i], &v, 4);
11886                 }
11887         }
11888
11889         /* Now parse and find the part number. */
11890         for (i = 0; i < 254; ) {
11891                 unsigned char val = vpd_data[i];
11892                 unsigned int block_end;
11893
11894                 if (val == 0x82 || val == 0x91) {
11895                         i = (i + 3 +
11896                              (vpd_data[i + 1] +
11897                               (vpd_data[i + 2] << 8)));
11898                         continue;
11899                 }
11900
11901                 if (val != 0x90)
11902                         goto out_not_found;
11903
11904                 block_end = (i + 3 +
11905                              (vpd_data[i + 1] +
11906                               (vpd_data[i + 2] << 8)));
11907                 i += 3;
11908
11909                 if (block_end > 256)
11910                         goto out_not_found;
11911
11912                 while (i < (block_end - 2)) {
11913                         if (vpd_data[i + 0] == 'P' &&
11914                             vpd_data[i + 1] == 'N') {
11915                                 int partno_len = vpd_data[i + 2];
11916
11917                                 i += 3;
11918                                 if (partno_len > 24 || (partno_len + i) > 256)
11919                                         goto out_not_found;
11920
11921                                 memcpy(tp->board_part_number,
11922                                        &vpd_data[i], partno_len);
11923
11924                                 /* Success. */
11925                                 return;
11926                         }
11927                         i += 3 + vpd_data[i + 2];
11928                 }
11929
11930                 /* Part number not found. */
11931                 goto out_not_found;
11932         }
11933
11934 out_not_found:
11935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11936                 strcpy(tp->board_part_number, "BCM95906");
11937         else
11938                 strcpy(tp->board_part_number, "none");
11939 }
11940
11941 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11942 {
11943         u32 val;
11944
11945         if (tg3_nvram_read_swab(tp, offset, &val) ||
11946             (val & 0xfc000000) != 0x0c000000 ||
11947             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11948             val != 0)
11949                 return 0;
11950
11951         return 1;
11952 }
11953
11954 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11955 {
11956         u32 val, offset, start;
11957         u32 ver_offset;
11958         int i, bcnt;
11959
11960         if (tg3_nvram_read_swab(tp, 0, &val))
11961                 return;
11962
11963         if (val != TG3_EEPROM_MAGIC)
11964                 return;
11965
11966         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11967             tg3_nvram_read_swab(tp, 0x4, &start))
11968                 return;
11969
11970         offset = tg3_nvram_logical_addr(tp, offset);
11971
11972         if (!tg3_fw_img_is_valid(tp, offset) ||
11973             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11974                 return;
11975
11976         offset = offset + ver_offset - start;
11977         for (i = 0; i < 16; i += 4) {
11978                 __le32 v;
11979                 if (tg3_nvram_read_le(tp, offset + i, &v))
11980                         return;
11981
11982                 memcpy(tp->fw_ver + i, &v, 4);
11983         }
11984
11985         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11986              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11987                 return;
11988
11989         for (offset = TG3_NVM_DIR_START;
11990              offset < TG3_NVM_DIR_END;
11991              offset += TG3_NVM_DIRENT_SIZE) {
11992                 if (tg3_nvram_read_swab(tp, offset, &val))
11993                         return;
11994
11995                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11996                         break;
11997         }
11998
11999         if (offset == TG3_NVM_DIR_END)
12000                 return;
12001
12002         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12003                 start = 0x08000000;
12004         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
12005                 return;
12006
12007         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
12008             !tg3_fw_img_is_valid(tp, offset) ||
12009             tg3_nvram_read_swab(tp, offset + 8, &val))
12010                 return;
12011
12012         offset += val - start;
12013
12014         bcnt = strlen(tp->fw_ver);
12015
12016         tp->fw_ver[bcnt++] = ',';
12017         tp->fw_ver[bcnt++] = ' ';
12018
12019         for (i = 0; i < 4; i++) {
12020                 __le32 v;
12021                 if (tg3_nvram_read_le(tp, offset, &v))
12022                         return;
12023
12024                 offset += sizeof(v);
12025
12026                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
12027                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
12028                         break;
12029                 }
12030
12031                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
12032                 bcnt += sizeof(v);
12033         }
12034
12035         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12036 }
12037
12038 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12039
12040 static int __devinit tg3_get_invariants(struct tg3 *tp)
12041 {
12042         static struct pci_device_id write_reorder_chipsets[] = {
12043                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12044                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12045                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12046                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12047                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12048                              PCI_DEVICE_ID_VIA_8385_0) },
12049                 { },
12050         };
12051         u32 misc_ctrl_reg;
12052         u32 cacheline_sz_reg;
12053         u32 pci_state_reg, grc_misc_cfg;
12054         u32 val;
12055         u16 pci_cmd;
12056         int err;
12057
12058         /* Force memory write invalidate off.  If we leave it on,
12059          * then on 5700_BX chips we have to enable a workaround.
12060          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12061          * to match the cacheline size.  The Broadcom driver have this
12062          * workaround but turns MWI off all the times so never uses
12063          * it.  This seems to suggest that the workaround is insufficient.
12064          */
12065         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12066         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12067         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12068
12069         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12070          * has the register indirect write enable bit set before
12071          * we try to access any of the MMIO registers.  It is also
12072          * critical that the PCI-X hw workaround situation is decided
12073          * before that as well.
12074          */
12075         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12076                               &misc_ctrl_reg);
12077
12078         tp->pci_chip_rev_id = (misc_ctrl_reg >>
12079                                MISC_HOST_CTRL_CHIPREV_SHIFT);
12080         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12081                 u32 prod_id_asic_rev;
12082
12083                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12084                                       &prod_id_asic_rev);
12085                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
12086         }
12087
12088         /* Wrong chip ID in 5752 A0. This code can be removed later
12089          * as A0 is not in production.
12090          */
12091         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12092                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12093
12094         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12095          * we need to disable memory and use config. cycles
12096          * only to access all registers. The 5702/03 chips
12097          * can mistakenly decode the special cycles from the
12098          * ICH chipsets as memory write cycles, causing corruption
12099          * of register and memory space. Only certain ICH bridges
12100          * will drive special cycles with non-zero data during the
12101          * address phase which can fall within the 5703's address
12102          * range. This is not an ICH bug as the PCI spec allows
12103          * non-zero address during special cycles. However, only
12104          * these ICH bridges are known to drive non-zero addresses
12105          * during special cycles.
12106          *
12107          * Since special cycles do not cross PCI bridges, we only
12108          * enable this workaround if the 5703 is on the secondary
12109          * bus of these ICH bridges.
12110          */
12111         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12112             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12113                 static struct tg3_dev_id {
12114                         u32     vendor;
12115                         u32     device;
12116                         u32     rev;
12117                 } ich_chipsets[] = {
12118                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12119                           PCI_ANY_ID },
12120                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12121                           PCI_ANY_ID },
12122                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12123                           0xa },
12124                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12125                           PCI_ANY_ID },
12126                         { },
12127                 };
12128                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12129                 struct pci_dev *bridge = NULL;
12130
12131                 while (pci_id->vendor != 0) {
12132                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
12133                                                 bridge);
12134                         if (!bridge) {
12135                                 pci_id++;
12136                                 continue;
12137                         }
12138                         if (pci_id->rev != PCI_ANY_ID) {
12139                                 if (bridge->revision > pci_id->rev)
12140                                         continue;
12141                         }
12142                         if (bridge->subordinate &&
12143                             (bridge->subordinate->number ==
12144                              tp->pdev->bus->number)) {
12145
12146                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12147                                 pci_dev_put(bridge);
12148                                 break;
12149                         }
12150                 }
12151         }
12152
12153         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12154                 static struct tg3_dev_id {
12155                         u32     vendor;
12156                         u32     device;
12157                 } bridge_chipsets[] = {
12158                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12159                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12160                         { },
12161                 };
12162                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12163                 struct pci_dev *bridge = NULL;
12164
12165                 while (pci_id->vendor != 0) {
12166                         bridge = pci_get_device(pci_id->vendor,
12167                                                 pci_id->device,
12168                                                 bridge);
12169                         if (!bridge) {
12170                                 pci_id++;
12171                                 continue;
12172                         }
12173                         if (bridge->subordinate &&
12174                             (bridge->subordinate->number <=
12175                              tp->pdev->bus->number) &&
12176                             (bridge->subordinate->subordinate >=
12177                              tp->pdev->bus->number)) {
12178                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12179                                 pci_dev_put(bridge);
12180                                 break;
12181                         }
12182                 }
12183         }
12184
12185         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12186          * DMA addresses > 40-bit. This bridge may have other additional
12187          * 57xx devices behind it in some 4-port NIC designs for example.
12188          * Any tg3 device found behind the bridge will also need the 40-bit
12189          * DMA workaround.
12190          */
12191         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12192             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12193                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12194                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12195                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12196         }
12197         else {
12198                 struct pci_dev *bridge = NULL;
12199
12200                 do {
12201                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12202                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
12203                                                 bridge);
12204                         if (bridge && bridge->subordinate &&
12205                             (bridge->subordinate->number <=
12206                              tp->pdev->bus->number) &&
12207                             (bridge->subordinate->subordinate >=
12208                              tp->pdev->bus->number)) {
12209                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12210                                 pci_dev_put(bridge);
12211                                 break;
12212                         }
12213                 } while (bridge);
12214         }
12215
12216         /* Initialize misc host control in PCI block. */
12217         tp->misc_host_ctrl |= (misc_ctrl_reg &
12218                                MISC_HOST_CTRL_CHIPREV);
12219         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12220                                tp->misc_host_ctrl);
12221
12222         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12223                               &cacheline_sz_reg);
12224
12225         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12226         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12227         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12228         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12229
12230         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12231             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12232                 tp->pdev_peer = tg3_find_peer(tp);
12233
12234         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12235             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12236             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12237             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12238             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12239             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12240             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12241             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12242             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12243                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12244
12245         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12246             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12247                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12248
12249         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12250                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12251                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12252                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12253                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12254                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12255                      tp->pdev_peer == tp->pdev))
12256                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12257
12258                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12259                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12260                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12261                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12262                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12263                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12264                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12265                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12266                 } else {
12267                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12268                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12269                                 ASIC_REV_5750 &&
12270                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12271                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12272                 }
12273         }
12274
12275         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12276              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12277                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12278
12279         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12280                               &pci_state_reg);
12281
12282         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12283         if (tp->pcie_cap != 0) {
12284                 u16 lnkctl;
12285
12286                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12287
12288                 pcie_set_readrq(tp->pdev, 4096);
12289
12290                 pci_read_config_word(tp->pdev,
12291                                      tp->pcie_cap + PCI_EXP_LNKCTL,
12292                                      &lnkctl);
12293                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12294                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12295                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12296                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12297                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12298                                 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12299                 }
12300         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12301                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12302         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12303                    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12304                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12305                 if (!tp->pcix_cap) {
12306                         printk(KERN_ERR PFX "Cannot find PCI-X "
12307                                             "capability, aborting.\n");
12308                         return -EIO;
12309                 }
12310
12311                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12312                         tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12313         }
12314
12315         /* If we have an AMD 762 or VIA K8T800 chipset, write
12316          * reordering to the mailbox registers done by the host
12317          * controller can cause major troubles.  We read back from
12318          * every mailbox register write to force the writes to be
12319          * posted to the chip in order.
12320          */
12321         if (pci_dev_present(write_reorder_chipsets) &&
12322             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12323                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12324
12325         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12326             tp->pci_lat_timer < 64) {
12327                 tp->pci_lat_timer = 64;
12328
12329                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12330                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12331                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12332                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12333
12334                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12335                                        cacheline_sz_reg);
12336         }
12337
12338         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12339                 /* 5700 BX chips need to have their TX producer index
12340                  * mailboxes written twice to workaround a bug.
12341                  */
12342                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12343
12344                 /* If we are in PCI-X mode, enable register write workaround.
12345                  *
12346                  * The workaround is to use indirect register accesses
12347                  * for all chip writes not to mailbox registers.
12348                  */
12349                 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12350                         u32 pm_reg;
12351
12352                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12353
12354                         /* The chip can have it's power management PCI config
12355                          * space registers clobbered due to this bug.
12356                          * So explicitly force the chip into D0 here.
12357                          */
12358                         pci_read_config_dword(tp->pdev,
12359                                               tp->pm_cap + PCI_PM_CTRL,
12360                                               &pm_reg);
12361                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12362                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12363                         pci_write_config_dword(tp->pdev,
12364                                                tp->pm_cap + PCI_PM_CTRL,
12365                                                pm_reg);
12366
12367                         /* Also, force SERR#/PERR# in PCI command. */
12368                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12369                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12370                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12371                 }
12372         }
12373
12374         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12375                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12376         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12377                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12378
12379         /* Chip-specific fixup from Broadcom driver */
12380         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12381             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12382                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12383                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12384         }
12385
12386         /* Default fast path register access methods */
12387         tp->read32 = tg3_read32;
12388         tp->write32 = tg3_write32;
12389         tp->read32_mbox = tg3_read32;
12390         tp->write32_mbox = tg3_write32;
12391         tp->write32_tx_mbox = tg3_write32;
12392         tp->write32_rx_mbox = tg3_write32;
12393
12394         /* Various workaround register access methods */
12395         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12396                 tp->write32 = tg3_write_indirect_reg32;
12397         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12398                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12399                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12400                 /*
12401                  * Back to back register writes can cause problems on these
12402                  * chips, the workaround is to read back all reg writes
12403                  * except those to mailbox regs.
12404                  *
12405                  * See tg3_write_indirect_reg32().
12406                  */
12407                 tp->write32 = tg3_write_flush_reg32;
12408         }
12409
12410
12411         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12412             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12413                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12414                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12415                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12416         }
12417
12418         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12419                 tp->read32 = tg3_read_indirect_reg32;
12420                 tp->write32 = tg3_write_indirect_reg32;
12421                 tp->read32_mbox = tg3_read_indirect_mbox;
12422                 tp->write32_mbox = tg3_write_indirect_mbox;
12423                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12424                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12425
12426                 iounmap(tp->regs);
12427                 tp->regs = NULL;
12428
12429                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12430                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12431                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12432         }
12433         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12434                 tp->read32_mbox = tg3_read32_mbox_5906;
12435                 tp->write32_mbox = tg3_write32_mbox_5906;
12436                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12437                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12438         }
12439
12440         if (tp->write32 == tg3_write_indirect_reg32 ||
12441             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12442              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12443               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12444                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12445
12446         /* Get eeprom hw config before calling tg3_set_power_state().
12447          * In particular, the TG3_FLG2_IS_NIC flag must be
12448          * determined before calling tg3_set_power_state() so that
12449          * we know whether or not to switch out of Vaux power.
12450          * When the flag is set, it means that GPIO1 is used for eeprom
12451          * write protect and also implies that it is a LOM where GPIOs
12452          * are not used to switch power.
12453          */
12454         tg3_get_eeprom_hw_cfg(tp);
12455
12456         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12457                 /* Allow reads and writes to the
12458                  * APE register and memory space.
12459                  */
12460                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12461                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12462                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12463                                        pci_state_reg);
12464         }
12465
12466         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12467             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12468             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12469                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12470
12471         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12472          * GPIO1 driven high will bring 5700's external PHY out of reset.
12473          * It is also used as eeprom write protect on LOMs.
12474          */
12475         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12476         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12477             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12478                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12479                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12480         /* Unused GPIO3 must be driven as output on 5752 because there
12481          * are no pull-up resistors on unused GPIO pins.
12482          */
12483         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12484                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12485
12486         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12487                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12488
12489         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12490                 /* Turn off the debug UART. */
12491                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12492                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12493                         /* Keep VMain power. */
12494                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12495                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12496         }
12497
12498         /* Force the chip into D0. */
12499         err = tg3_set_power_state(tp, PCI_D0);
12500         if (err) {
12501                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12502                        pci_name(tp->pdev));
12503                 return err;
12504         }
12505
12506         /* 5700 B0 chips do not support checksumming correctly due
12507          * to hardware bugs.
12508          */
12509         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12510                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12511
12512         /* Derive initial jumbo mode from MTU assigned in
12513          * ether_setup() via the alloc_etherdev() call
12514          */
12515         if (tp->dev->mtu > ETH_DATA_LEN &&
12516             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12517                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12518
12519         /* Determine WakeOnLan speed to use. */
12520         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12521             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12522             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12523             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12524                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12525         } else {
12526                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12527         }
12528
12529         /* A few boards don't want Ethernet@WireSpeed phy feature */
12530         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12531             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12532              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12533              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12534             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12535             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12536                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12537
12538         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12539             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12540                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12541         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12542                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12543
12544         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12545                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12546                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12547                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12548                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12549                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12550                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12551                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12552                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12553                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12554                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12555                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12556                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12557         }
12558
12559         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12560             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12561                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12562                 if (tp->phy_otp == 0)
12563                         tp->phy_otp = TG3_OTP_DEFAULT;
12564         }
12565
12566         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12567                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12568         else
12569                 tp->mi_mode = MAC_MI_MODE_BASE;
12570
12571         tp->coalesce_mode = 0;
12572         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12573             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12574                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12575
12576         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12577                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12578
12579         err = tg3_mdio_init(tp);
12580         if (err)
12581                 return err;
12582
12583         /* Initialize data/descriptor byte/word swapping. */
12584         val = tr32(GRC_MODE);
12585         val &= GRC_MODE_HOST_STACKUP;
12586         tw32(GRC_MODE, val | tp->grc_mode);
12587
12588         tg3_switch_clocks(tp);
12589
12590         /* Clear this out for sanity. */
12591         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12592
12593         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12594                               &pci_state_reg);
12595         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12596             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12597                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12598
12599                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12600                     chiprevid == CHIPREV_ID_5701_B0 ||
12601                     chiprevid == CHIPREV_ID_5701_B2 ||
12602                     chiprevid == CHIPREV_ID_5701_B5) {
12603                         void __iomem *sram_base;
12604
12605                         /* Write some dummy words into the SRAM status block
12606                          * area, see if it reads back correctly.  If the return
12607                          * value is bad, force enable the PCIX workaround.
12608                          */
12609                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12610
12611                         writel(0x00000000, sram_base);
12612                         writel(0x00000000, sram_base + 4);
12613                         writel(0xffffffff, sram_base + 4);
12614                         if (readl(sram_base) != 0x00000000)
12615                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12616                 }
12617         }
12618
12619         udelay(50);
12620         tg3_nvram_init(tp);
12621
12622         grc_misc_cfg = tr32(GRC_MISC_CFG);
12623         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12624
12625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12626             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12627              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12628                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12629
12630         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12631             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12632                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12633         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12634                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12635                                       HOSTCC_MODE_CLRTICK_TXBD);
12636
12637                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12638                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12639                                        tp->misc_host_ctrl);
12640         }
12641
12642         /* Preserve the APE MAC_MODE bits */
12643         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12644                 tp->mac_mode = tr32(MAC_MODE) |
12645                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12646         else
12647                 tp->mac_mode = TG3_DEF_MAC_MODE;
12648
12649         /* these are limited to 10/100 only */
12650         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12651              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12652             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12653              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12654              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12655               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12656               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12657             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12658              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12659               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12660               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12662                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12663
12664         err = tg3_phy_probe(tp);
12665         if (err) {
12666                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12667                        pci_name(tp->pdev), err);
12668                 /* ... but do not return immediately ... */
12669                 tg3_mdio_fini(tp);
12670         }
12671
12672         tg3_read_partno(tp);
12673         tg3_read_fw_ver(tp);
12674
12675         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12676                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12677         } else {
12678                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12679                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12680                 else
12681                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12682         }
12683
12684         /* 5700 {AX,BX} chips have a broken status block link
12685          * change bit implementation, so we must use the
12686          * status register in those cases.
12687          */
12688         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12689                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12690         else
12691                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12692
12693         /* The led_ctrl is set during tg3_phy_probe, here we might
12694          * have to force the link status polling mechanism based
12695          * upon subsystem IDs.
12696          */
12697         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12698             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12699             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12700                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12701                                   TG3_FLAG_USE_LINKCHG_REG);
12702         }
12703
12704         /* For all SERDES we poll the MAC status register. */
12705         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12706                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12707         else
12708                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12709
12710         tp->rx_offset = NET_IP_ALIGN;
12711         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12712             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12713                 tp->rx_offset = 0;
12714
12715         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12716
12717         /* Increment the rx prod index on the rx std ring by at most
12718          * 8 for these chips to workaround hw errata.
12719          */
12720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12721             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12722             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12723                 tp->rx_std_max_post = 8;
12724
12725         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12726                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12727                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12728
12729         return err;
12730 }
12731
12732 #ifdef CONFIG_SPARC
12733 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12734 {
12735         struct net_device *dev = tp->dev;
12736         struct pci_dev *pdev = tp->pdev;
12737         struct device_node *dp = pci_device_to_OF_node(pdev);
12738         const unsigned char *addr;
12739         int len;
12740
12741         addr = of_get_property(dp, "local-mac-address", &len);
12742         if (addr && len == 6) {
12743                 memcpy(dev->dev_addr, addr, 6);
12744                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12745                 return 0;
12746         }
12747         return -ENODEV;
12748 }
12749
12750 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12751 {
12752         struct net_device *dev = tp->dev;
12753
12754         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12755         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12756         return 0;
12757 }
12758 #endif
12759
12760 static int __devinit tg3_get_device_address(struct tg3 *tp)
12761 {
12762         struct net_device *dev = tp->dev;
12763         u32 hi, lo, mac_offset;
12764         int addr_ok = 0;
12765
12766 #ifdef CONFIG_SPARC
12767         if (!tg3_get_macaddr_sparc(tp))
12768                 return 0;
12769 #endif
12770
12771         mac_offset = 0x7c;
12772         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12773             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12774                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12775                         mac_offset = 0xcc;
12776                 if (tg3_nvram_lock(tp))
12777                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12778                 else
12779                         tg3_nvram_unlock(tp);
12780         }
12781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12782                 mac_offset = 0x10;
12783
12784         /* First try to get it from MAC address mailbox. */
12785         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12786         if ((hi >> 16) == 0x484b) {
12787                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12788                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12789
12790                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12791                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12792                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12793                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12794                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12795
12796                 /* Some old bootcode may report a 0 MAC address in SRAM */
12797                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12798         }
12799         if (!addr_ok) {
12800                 /* Next, try NVRAM. */
12801                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12802                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12803                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12804                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12805                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12806                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12807                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12808                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12809                 }
12810                 /* Finally just fetch it out of the MAC control regs. */
12811                 else {
12812                         hi = tr32(MAC_ADDR_0_HIGH);
12813                         lo = tr32(MAC_ADDR_0_LOW);
12814
12815                         dev->dev_addr[5] = lo & 0xff;
12816                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12817                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12818                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12819                         dev->dev_addr[1] = hi & 0xff;
12820                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12821                 }
12822         }
12823
12824         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12825 #ifdef CONFIG_SPARC
12826                 if (!tg3_get_default_macaddr_sparc(tp))
12827                         return 0;
12828 #endif
12829                 return -EINVAL;
12830         }
12831         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12832         return 0;
12833 }
12834
12835 #define BOUNDARY_SINGLE_CACHELINE       1
12836 #define BOUNDARY_MULTI_CACHELINE        2
12837
12838 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12839 {
12840         int cacheline_size;
12841         u8 byte;
12842         int goal;
12843
12844         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12845         if (byte == 0)
12846                 cacheline_size = 1024;
12847         else
12848                 cacheline_size = (int) byte * 4;
12849
12850         /* On 5703 and later chips, the boundary bits have no
12851          * effect.
12852          */
12853         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12854             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12855             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12856                 goto out;
12857
12858 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12859         goal = BOUNDARY_MULTI_CACHELINE;
12860 #else
12861 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12862         goal = BOUNDARY_SINGLE_CACHELINE;
12863 #else
12864         goal = 0;
12865 #endif
12866 #endif
12867
12868         if (!goal)
12869                 goto out;
12870
12871         /* PCI controllers on most RISC systems tend to disconnect
12872          * when a device tries to burst across a cache-line boundary.
12873          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12874          *
12875          * Unfortunately, for PCI-E there are only limited
12876          * write-side controls for this, and thus for reads
12877          * we will still get the disconnects.  We'll also waste
12878          * these PCI cycles for both read and write for chips
12879          * other than 5700 and 5701 which do not implement the
12880          * boundary bits.
12881          */
12882         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12883             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12884                 switch (cacheline_size) {
12885                 case 16:
12886                 case 32:
12887                 case 64:
12888                 case 128:
12889                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12890                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12891                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12892                         } else {
12893                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12894                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12895                         }
12896                         break;
12897
12898                 case 256:
12899                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12900                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12901                         break;
12902
12903                 default:
12904                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12905                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12906                         break;
12907                 }
12908         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12909                 switch (cacheline_size) {
12910                 case 16:
12911                 case 32:
12912                 case 64:
12913                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12914                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12915                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12916                                 break;
12917                         }
12918                         /* fallthrough */
12919                 case 128:
12920                 default:
12921                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12922                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12923                         break;
12924                 }
12925         } else {
12926                 switch (cacheline_size) {
12927                 case 16:
12928                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12929                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12930                                         DMA_RWCTRL_WRITE_BNDRY_16);
12931                                 break;
12932                         }
12933                         /* fallthrough */
12934                 case 32:
12935                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12936                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12937                                         DMA_RWCTRL_WRITE_BNDRY_32);
12938                                 break;
12939                         }
12940                         /* fallthrough */
12941                 case 64:
12942                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12943                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12944                                         DMA_RWCTRL_WRITE_BNDRY_64);
12945                                 break;
12946                         }
12947                         /* fallthrough */
12948                 case 128:
12949                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12950                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12951                                         DMA_RWCTRL_WRITE_BNDRY_128);
12952                                 break;
12953                         }
12954                         /* fallthrough */
12955                 case 256:
12956                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12957                                 DMA_RWCTRL_WRITE_BNDRY_256);
12958                         break;
12959                 case 512:
12960                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12961                                 DMA_RWCTRL_WRITE_BNDRY_512);
12962                         break;
12963                 case 1024:
12964                 default:
12965                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12966                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12967                         break;
12968                 }
12969         }
12970
12971 out:
12972         return val;
12973 }
12974
12975 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12976 {
12977         struct tg3_internal_buffer_desc test_desc;
12978         u32 sram_dma_descs;
12979         int i, ret;
12980
12981         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12982
12983         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12984         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12985         tw32(RDMAC_STATUS, 0);
12986         tw32(WDMAC_STATUS, 0);
12987
12988         tw32(BUFMGR_MODE, 0);
12989         tw32(FTQ_RESET, 0);
12990
12991         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12992         test_desc.addr_lo = buf_dma & 0xffffffff;
12993         test_desc.nic_mbuf = 0x00002100;
12994         test_desc.len = size;
12995
12996         /*
12997          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12998          * the *second* time the tg3 driver was getting loaded after an
12999          * initial scan.
13000          *
13001          * Broadcom tells me:
13002          *   ...the DMA engine is connected to the GRC block and a DMA
13003          *   reset may affect the GRC block in some unpredictable way...
13004          *   The behavior of resets to individual blocks has not been tested.
13005          *
13006          * Broadcom noted the GRC reset will also reset all sub-components.
13007          */
13008         if (to_device) {
13009                 test_desc.cqid_sqid = (13 << 8) | 2;
13010
13011                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13012                 udelay(40);
13013         } else {
13014                 test_desc.cqid_sqid = (16 << 8) | 7;
13015
13016                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13017                 udelay(40);
13018         }
13019         test_desc.flags = 0x00000005;
13020
13021         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13022                 u32 val;
13023
13024                 val = *(((u32 *)&test_desc) + i);
13025                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13026                                        sram_dma_descs + (i * sizeof(u32)));
13027                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13028         }
13029         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13030
13031         if (to_device) {
13032                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13033         } else {
13034                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13035         }
13036
13037         ret = -ENODEV;
13038         for (i = 0; i < 40; i++) {
13039                 u32 val;
13040
13041                 if (to_device)
13042                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13043                 else
13044                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13045                 if ((val & 0xffff) == sram_dma_descs) {
13046                         ret = 0;
13047                         break;
13048                 }
13049
13050                 udelay(100);
13051         }
13052
13053         return ret;
13054 }
13055
13056 #define TEST_BUFFER_SIZE        0x2000
13057
13058 static int __devinit tg3_test_dma(struct tg3 *tp)
13059 {
13060         dma_addr_t buf_dma;
13061         u32 *buf, saved_dma_rwctrl;
13062         int ret;
13063
13064         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13065         if (!buf) {
13066                 ret = -ENOMEM;
13067                 goto out_nofree;
13068         }
13069
13070         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13071                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13072
13073         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13074
13075         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13076                 /* DMA read watermark not used on PCIE */
13077                 tp->dma_rwctrl |= 0x00180000;
13078         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13079                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13080                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13081                         tp->dma_rwctrl |= 0x003f0000;
13082                 else
13083                         tp->dma_rwctrl |= 0x003f000f;
13084         } else {
13085                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13086                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13087                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13088                         u32 read_water = 0x7;
13089
13090                         /* If the 5704 is behind the EPB bridge, we can
13091                          * do the less restrictive ONE_DMA workaround for
13092                          * better performance.
13093                          */
13094                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13095                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13096                                 tp->dma_rwctrl |= 0x8000;
13097                         else if (ccval == 0x6 || ccval == 0x7)
13098                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13099
13100                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13101                                 read_water = 4;
13102                         /* Set bit 23 to enable PCIX hw bug fix */
13103                         tp->dma_rwctrl |=
13104                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13105                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13106                                 (1 << 23);
13107                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13108                         /* 5780 always in PCIX mode */
13109                         tp->dma_rwctrl |= 0x00144000;
13110                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13111                         /* 5714 always in PCIX mode */
13112                         tp->dma_rwctrl |= 0x00148000;
13113                 } else {
13114                         tp->dma_rwctrl |= 0x001b000f;
13115                 }
13116         }
13117
13118         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13119             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13120                 tp->dma_rwctrl &= 0xfffffff0;
13121
13122         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13123             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13124                 /* Remove this if it causes problems for some boards. */
13125                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13126
13127                 /* On 5700/5701 chips, we need to set this bit.
13128                  * Otherwise the chip will issue cacheline transactions
13129                  * to streamable DMA memory with not all the byte
13130                  * enables turned on.  This is an error on several
13131                  * RISC PCI controllers, in particular sparc64.
13132                  *
13133                  * On 5703/5704 chips, this bit has been reassigned
13134                  * a different meaning.  In particular, it is used
13135                  * on those chips to enable a PCI-X workaround.
13136                  */
13137                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13138         }
13139
13140         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13141
13142 #if 0
13143         /* Unneeded, already done by tg3_get_invariants.  */
13144         tg3_switch_clocks(tp);
13145 #endif
13146
13147         ret = 0;
13148         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13149             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13150                 goto out;
13151
13152         /* It is best to perform DMA test with maximum write burst size
13153          * to expose the 5700/5701 write DMA bug.
13154          */
13155         saved_dma_rwctrl = tp->dma_rwctrl;
13156         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13157         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13158
13159         while (1) {
13160                 u32 *p = buf, i;
13161
13162                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13163                         p[i] = i;
13164
13165                 /* Send the buffer to the chip. */
13166                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13167                 if (ret) {
13168                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13169                         break;
13170                 }
13171
13172 #if 0
13173                 /* validate data reached card RAM correctly. */
13174                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13175                         u32 val;
13176                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
13177                         if (le32_to_cpu(val) != p[i]) {
13178                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
13179                                 /* ret = -ENODEV here? */
13180                         }
13181                         p[i] = 0;
13182                 }
13183 #endif
13184                 /* Now read it back. */
13185                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13186                 if (ret) {
13187                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13188
13189                         break;
13190                 }
13191
13192                 /* Verify it. */
13193                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13194                         if (p[i] == i)
13195                                 continue;
13196
13197                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13198                             DMA_RWCTRL_WRITE_BNDRY_16) {
13199                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13200                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13201                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13202                                 break;
13203                         } else {
13204                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13205                                 ret = -ENODEV;
13206                                 goto out;
13207                         }
13208                 }
13209
13210                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13211                         /* Success. */
13212                         ret = 0;
13213                         break;
13214                 }
13215         }
13216         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13217             DMA_RWCTRL_WRITE_BNDRY_16) {
13218                 static struct pci_device_id dma_wait_state_chipsets[] = {
13219                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13220                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13221                         { },
13222                 };
13223
13224                 /* DMA test passed without adjusting DMA boundary,
13225                  * now look for chipsets that are known to expose the
13226                  * DMA bug without failing the test.
13227                  */
13228                 if (pci_dev_present(dma_wait_state_chipsets)) {
13229                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13230                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13231                 }
13232                 else
13233                         /* Safe to use the calculated DMA boundary. */
13234                         tp->dma_rwctrl = saved_dma_rwctrl;
13235
13236                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13237         }
13238
13239 out:
13240         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13241 out_nofree:
13242         return ret;
13243 }
13244
13245 static void __devinit tg3_init_link_config(struct tg3 *tp)
13246 {
13247         tp->link_config.advertising =
13248                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13249                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13250                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13251                  ADVERTISED_Autoneg | ADVERTISED_MII);
13252         tp->link_config.speed = SPEED_INVALID;
13253         tp->link_config.duplex = DUPLEX_INVALID;
13254         tp->link_config.autoneg = AUTONEG_ENABLE;
13255         tp->link_config.active_speed = SPEED_INVALID;
13256         tp->link_config.active_duplex = DUPLEX_INVALID;
13257         tp->link_config.phy_is_low_power = 0;
13258         tp->link_config.orig_speed = SPEED_INVALID;
13259         tp->link_config.orig_duplex = DUPLEX_INVALID;
13260         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13261 }
13262
13263 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13264 {
13265         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13266                 tp->bufmgr_config.mbuf_read_dma_low_water =
13267                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13268                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13269                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13270                 tp->bufmgr_config.mbuf_high_water =
13271                         DEFAULT_MB_HIGH_WATER_5705;
13272                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13273                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13274                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13275                         tp->bufmgr_config.mbuf_high_water =
13276                                 DEFAULT_MB_HIGH_WATER_5906;
13277                 }
13278
13279                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13280                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13281                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13282                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13283                 tp->bufmgr_config.mbuf_high_water_jumbo =
13284                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13285         } else {
13286                 tp->bufmgr_config.mbuf_read_dma_low_water =
13287                         DEFAULT_MB_RDMA_LOW_WATER;
13288                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13289                         DEFAULT_MB_MACRX_LOW_WATER;
13290                 tp->bufmgr_config.mbuf_high_water =
13291                         DEFAULT_MB_HIGH_WATER;
13292
13293                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13294                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13295                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13296                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13297                 tp->bufmgr_config.mbuf_high_water_jumbo =
13298                         DEFAULT_MB_HIGH_WATER_JUMBO;
13299         }
13300
13301         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13302         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13303 }
13304
13305 static char * __devinit tg3_phy_string(struct tg3 *tp)
13306 {
13307         switch (tp->phy_id & PHY_ID_MASK) {
13308         case PHY_ID_BCM5400:    return "5400";
13309         case PHY_ID_BCM5401:    return "5401";
13310         case PHY_ID_BCM5411:    return "5411";
13311         case PHY_ID_BCM5701:    return "5701";
13312         case PHY_ID_BCM5703:    return "5703";
13313         case PHY_ID_BCM5704:    return "5704";
13314         case PHY_ID_BCM5705:    return "5705";
13315         case PHY_ID_BCM5750:    return "5750";
13316         case PHY_ID_BCM5752:    return "5752";
13317         case PHY_ID_BCM5714:    return "5714";
13318         case PHY_ID_BCM5780:    return "5780";
13319         case PHY_ID_BCM5755:    return "5755";
13320         case PHY_ID_BCM5787:    return "5787";
13321         case PHY_ID_BCM5784:    return "5784";
13322         case PHY_ID_BCM5756:    return "5722/5756";
13323         case PHY_ID_BCM5906:    return "5906";
13324         case PHY_ID_BCM5761:    return "5761";
13325         case PHY_ID_BCM8002:    return "8002/serdes";
13326         case 0:                 return "serdes";
13327         default:                return "unknown";
13328         }
13329 }
13330
13331 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13332 {
13333         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13334                 strcpy(str, "PCI Express");
13335                 return str;
13336         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13337                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13338
13339                 strcpy(str, "PCIX:");
13340
13341                 if ((clock_ctrl == 7) ||
13342                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13343                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13344                         strcat(str, "133MHz");
13345                 else if (clock_ctrl == 0)
13346                         strcat(str, "33MHz");
13347                 else if (clock_ctrl == 2)
13348                         strcat(str, "50MHz");
13349                 else if (clock_ctrl == 4)
13350                         strcat(str, "66MHz");
13351                 else if (clock_ctrl == 6)
13352                         strcat(str, "100MHz");
13353         } else {
13354                 strcpy(str, "PCI:");
13355                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13356                         strcat(str, "66MHz");
13357                 else
13358                         strcat(str, "33MHz");
13359         }
13360         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13361                 strcat(str, ":32-bit");
13362         else
13363                 strcat(str, ":64-bit");
13364         return str;
13365 }
13366
13367 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13368 {
13369         struct pci_dev *peer;
13370         unsigned int func, devnr = tp->pdev->devfn & ~7;
13371
13372         for (func = 0; func < 8; func++) {
13373                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13374                 if (peer && peer != tp->pdev)
13375                         break;
13376                 pci_dev_put(peer);
13377         }
13378         /* 5704 can be configured in single-port mode, set peer to
13379          * tp->pdev in that case.
13380          */
13381         if (!peer) {
13382                 peer = tp->pdev;
13383                 return peer;
13384         }
13385
13386         /*
13387          * We don't need to keep the refcount elevated; there's no way
13388          * to remove one half of this device without removing the other
13389          */
13390         pci_dev_put(peer);
13391
13392         return peer;
13393 }
13394
13395 static void __devinit tg3_init_coal(struct tg3 *tp)
13396 {
13397         struct ethtool_coalesce *ec = &tp->coal;
13398
13399         memset(ec, 0, sizeof(*ec));
13400         ec->cmd = ETHTOOL_GCOALESCE;
13401         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13402         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13403         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13404         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13405         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13406         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13407         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13408         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13409         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13410
13411         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13412                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13413                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13414                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13415                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13416                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13417         }
13418
13419         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13420                 ec->rx_coalesce_usecs_irq = 0;
13421                 ec->tx_coalesce_usecs_irq = 0;
13422                 ec->stats_block_coalesce_usecs = 0;
13423         }
13424 }
13425
13426 static const struct net_device_ops tg3_netdev_ops = {
13427         .ndo_open               = tg3_open,
13428         .ndo_stop               = tg3_close,
13429         .ndo_start_xmit         = tg3_start_xmit,
13430         .ndo_get_stats          = tg3_get_stats,
13431         .ndo_validate_addr      = eth_validate_addr,
13432         .ndo_set_multicast_list = tg3_set_rx_mode,
13433         .ndo_set_mac_address    = tg3_set_mac_addr,
13434         .ndo_do_ioctl           = tg3_ioctl,
13435         .ndo_tx_timeout         = tg3_tx_timeout,
13436         .ndo_change_mtu         = tg3_change_mtu,
13437 #if TG3_VLAN_TAG_USED
13438         .ndo_vlan_rx_register   = tg3_vlan_rx_register,
13439 #endif
13440 #ifdef CONFIG_NET_POLL_CONTROLLER
13441         .ndo_poll_controller    = tg3_poll_controller,
13442 #endif
13443 };
13444
13445 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13446         .ndo_open               = tg3_open,
13447         .ndo_stop               = tg3_close,
13448         .ndo_start_xmit         = tg3_start_xmit_dma_bug,
13449         .ndo_get_stats          = tg3_get_stats,
13450         .ndo_validate_addr      = eth_validate_addr,
13451         .ndo_set_multicast_list = tg3_set_rx_mode,
13452         .ndo_set_mac_address    = tg3_set_mac_addr,
13453         .ndo_do_ioctl           = tg3_ioctl,
13454         .ndo_tx_timeout         = tg3_tx_timeout,
13455         .ndo_change_mtu         = tg3_change_mtu,
13456 #if TG3_VLAN_TAG_USED
13457         .ndo_vlan_rx_register   = tg3_vlan_rx_register,
13458 #endif
13459 #ifdef CONFIG_NET_POLL_CONTROLLER
13460         .ndo_poll_controller    = tg3_poll_controller,
13461 #endif
13462 };
13463
13464 static int __devinit tg3_init_one(struct pci_dev *pdev,
13465                                   const struct pci_device_id *ent)
13466 {
13467         static int tg3_version_printed = 0;
13468         resource_size_t tg3reg_len;
13469         struct net_device *dev;
13470         struct tg3 *tp;
13471         int err, pm_cap;
13472         char str[40];
13473         u64 dma_mask, persist_dma_mask;
13474
13475         if (tg3_version_printed++ == 0)
13476                 printk(KERN_INFO "%s", version);
13477
13478         err = pci_enable_device(pdev);
13479         if (err) {
13480                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13481                        "aborting.\n");
13482                 return err;
13483         }
13484
13485         if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
13486                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13487                        "base address, aborting.\n");
13488                 err = -ENODEV;
13489                 goto err_out_disable_pdev;
13490         }
13491
13492         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13493         if (err) {
13494                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13495                        "aborting.\n");
13496                 goto err_out_disable_pdev;
13497         }
13498
13499         pci_set_master(pdev);
13500
13501         /* Find power-management capability. */
13502         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13503         if (pm_cap == 0) {
13504                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13505                        "aborting.\n");
13506                 err = -EIO;
13507                 goto err_out_free_res;
13508         }
13509
13510         dev = alloc_etherdev(sizeof(*tp));
13511         if (!dev) {
13512                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13513                 err = -ENOMEM;
13514                 goto err_out_free_res;
13515         }
13516
13517         SET_NETDEV_DEV(dev, &pdev->dev);
13518
13519 #if TG3_VLAN_TAG_USED
13520         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13521 #endif
13522
13523         tp = netdev_priv(dev);
13524         tp->pdev = pdev;
13525         tp->dev = dev;
13526         tp->pm_cap = pm_cap;
13527         tp->rx_mode = TG3_DEF_RX_MODE;
13528         tp->tx_mode = TG3_DEF_TX_MODE;
13529
13530         if (tg3_debug > 0)
13531                 tp->msg_enable = tg3_debug;
13532         else
13533                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13534
13535         /* The word/byte swap controls here control register access byte
13536          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13537          * setting below.
13538          */
13539         tp->misc_host_ctrl =
13540                 MISC_HOST_CTRL_MASK_PCI_INT |
13541                 MISC_HOST_CTRL_WORD_SWAP |
13542                 MISC_HOST_CTRL_INDIR_ACCESS |
13543                 MISC_HOST_CTRL_PCISTATE_RW;
13544
13545         /* The NONFRM (non-frame) byte/word swap controls take effect
13546          * on descriptor entries, anything which isn't packet data.
13547          *
13548          * The StrongARM chips on the board (one for tx, one for rx)
13549          * are running in big-endian mode.
13550          */
13551         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13552                         GRC_MODE_WSWAP_NONFRM_DATA);
13553 #ifdef __BIG_ENDIAN
13554         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13555 #endif
13556         spin_lock_init(&tp->lock);
13557         spin_lock_init(&tp->indirect_lock);
13558         INIT_WORK(&tp->reset_task, tg3_reset_task);
13559
13560         dev->mem_start = pci_resource_start(pdev, BAR_0);
13561         tg3reg_len = pci_resource_len(pdev, BAR_0);
13562         dev->mem_end = dev->mem_start + tg3reg_len;
13563
13564         tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
13565         if (!tp->regs) {
13566                 printk(KERN_ERR PFX "Cannot map device registers, "
13567                        "aborting.\n");
13568                 err = -ENOMEM;
13569                 goto err_out_free_dev;
13570         }
13571
13572         tg3_init_link_config(tp);
13573
13574         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13575         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13576         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13577
13578         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13579         dev->ethtool_ops = &tg3_ethtool_ops;
13580         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13581         dev->irq = pdev->irq;
13582
13583         err = tg3_get_invariants(tp);
13584         if (err) {
13585                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13586                        "aborting.\n");
13587                 goto err_out_iounmap;
13588         }
13589
13590         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13591             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13592             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13595             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13596                 dev->netdev_ops = &tg3_netdev_ops;
13597         else
13598                 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13599
13600
13601         /* The EPB bridge inside 5714, 5715, and 5780 and any
13602          * device behind the EPB cannot support DMA addresses > 40-bit.
13603          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13604          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13605          * do DMA address check in tg3_start_xmit().
13606          */
13607         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13608                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13609         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13610                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13611 #ifdef CONFIG_HIGHMEM
13612                 dma_mask = DMA_64BIT_MASK;
13613 #endif
13614         } else
13615                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13616
13617         /* Configure DMA attributes. */
13618         if (dma_mask > DMA_32BIT_MASK) {
13619                 err = pci_set_dma_mask(pdev, dma_mask);
13620                 if (!err) {
13621                         dev->features |= NETIF_F_HIGHDMA;
13622                         err = pci_set_consistent_dma_mask(pdev,
13623                                                           persist_dma_mask);
13624                         if (err < 0) {
13625                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13626                                        "DMA for consistent allocations\n");
13627                                 goto err_out_iounmap;
13628                         }
13629                 }
13630         }
13631         if (err || dma_mask == DMA_32BIT_MASK) {
13632                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13633                 if (err) {
13634                         printk(KERN_ERR PFX "No usable DMA configuration, "
13635                                "aborting.\n");
13636                         goto err_out_iounmap;
13637                 }
13638         }
13639
13640         tg3_init_bufmgr_config(tp);
13641
13642         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13643                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13644         }
13645         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13646             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13647             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13648             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13649             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13650                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13651         } else {
13652                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13653         }
13654
13655         /* TSO is on by default on chips that support hardware TSO.
13656          * Firmware TSO on older chips gives lower performance, so it
13657          * is off by default, but can be enabled using ethtool.
13658          */
13659         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13660                 dev->features |= NETIF_F_TSO;
13661                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13662                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13663                         dev->features |= NETIF_F_TSO6;
13664                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13665                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13666                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13667                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13668                         dev->features |= NETIF_F_TSO_ECN;
13669         }
13670
13671
13672         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13673             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13674             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13675                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13676                 tp->rx_pending = 63;
13677         }
13678
13679         err = tg3_get_device_address(tp);
13680         if (err) {
13681                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13682                        "aborting.\n");
13683                 goto err_out_iounmap;
13684         }
13685
13686         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13687                 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
13688                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13689                                "base address for APE, aborting.\n");
13690                         err = -ENODEV;
13691                         goto err_out_iounmap;
13692                 }
13693
13694                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13695                 if (!tp->aperegs) {
13696                         printk(KERN_ERR PFX "Cannot map APE registers, "
13697                                "aborting.\n");
13698                         err = -ENOMEM;
13699                         goto err_out_iounmap;
13700                 }
13701
13702                 tg3_ape_lock_init(tp);
13703         }
13704
13705         /*
13706          * Reset chip in case UNDI or EFI driver did not shutdown
13707          * DMA self test will enable WDMAC and we'll see (spurious)
13708          * pending DMA on the PCI bus at that point.
13709          */
13710         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13711             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13712                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13713                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13714         }
13715
13716         err = tg3_test_dma(tp);
13717         if (err) {
13718                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13719                 goto err_out_apeunmap;
13720         }
13721
13722         /* Tigon3 can do ipv4 only... and some chips have buggy
13723          * checksumming.
13724          */
13725         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13726                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13727                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13728                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13729                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13730                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13731                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13732                         dev->features |= NETIF_F_IPV6_CSUM;
13733
13734                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13735         } else
13736                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13737
13738         /* flow control autonegotiation is default behavior */
13739         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13740         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13741
13742         tg3_init_coal(tp);
13743
13744         pci_set_drvdata(pdev, dev);
13745
13746         err = register_netdev(dev);
13747         if (err) {
13748                 printk(KERN_ERR PFX "Cannot register net device, "
13749                        "aborting.\n");
13750                 goto err_out_apeunmap;
13751         }
13752
13753         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13754                dev->name,
13755                tp->board_part_number,
13756                tp->pci_chip_rev_id,
13757                tg3_bus_string(tp, str),
13758                dev->dev_addr);
13759
13760         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13761                 printk(KERN_INFO
13762                        "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13763                        tp->dev->name,
13764                        tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13765                        dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13766         else
13767                 printk(KERN_INFO
13768                        "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13769                        tp->dev->name, tg3_phy_string(tp),
13770                        ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13771                         ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13772                          "10/100/1000Base-T")),
13773                        (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13774
13775         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13776                dev->name,
13777                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13778                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13779                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13780                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13781                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13782         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13783                dev->name, tp->dma_rwctrl,
13784                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13785                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13786
13787         return 0;
13788
13789 err_out_apeunmap:
13790         if (tp->aperegs) {
13791                 iounmap(tp->aperegs);
13792                 tp->aperegs = NULL;
13793         }
13794
13795 err_out_iounmap:
13796         if (tp->regs) {
13797                 iounmap(tp->regs);
13798                 tp->regs = NULL;
13799         }
13800
13801 err_out_free_dev:
13802         free_netdev(dev);
13803
13804 err_out_free_res:
13805         pci_release_regions(pdev);
13806
13807 err_out_disable_pdev:
13808         pci_disable_device(pdev);
13809         pci_set_drvdata(pdev, NULL);
13810         return err;
13811 }
13812
13813 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13814 {
13815         struct net_device *dev = pci_get_drvdata(pdev);
13816
13817         if (dev) {
13818                 struct tg3 *tp = netdev_priv(dev);
13819
13820                 flush_scheduled_work();
13821
13822                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13823                         tg3_phy_fini(tp);
13824                         tg3_mdio_fini(tp);
13825                 }
13826
13827                 unregister_netdev(dev);
13828                 if (tp->aperegs) {
13829                         iounmap(tp->aperegs);
13830                         tp->aperegs = NULL;
13831                 }
13832                 if (tp->regs) {
13833                         iounmap(tp->regs);
13834                         tp->regs = NULL;
13835                 }
13836                 free_netdev(dev);
13837                 pci_release_regions(pdev);
13838                 pci_disable_device(pdev);
13839                 pci_set_drvdata(pdev, NULL);
13840         }
13841 }
13842
13843 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13844 {
13845         struct net_device *dev = pci_get_drvdata(pdev);
13846         struct tg3 *tp = netdev_priv(dev);
13847         pci_power_t target_state;
13848         int err;
13849
13850         /* PCI register 4 needs to be saved whether netif_running() or not.
13851          * MSI address and data need to be saved if using MSI and
13852          * netif_running().
13853          */
13854         pci_save_state(pdev);
13855
13856         if (!netif_running(dev))
13857                 return 0;
13858
13859         flush_scheduled_work();
13860         tg3_phy_stop(tp);
13861         tg3_netif_stop(tp);
13862
13863         del_timer_sync(&tp->timer);
13864
13865         tg3_full_lock(tp, 1);
13866         tg3_disable_ints(tp);
13867         tg3_full_unlock(tp);
13868
13869         netif_device_detach(dev);
13870
13871         tg3_full_lock(tp, 0);
13872         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13873         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13874         tg3_full_unlock(tp);
13875
13876         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13877
13878         err = tg3_set_power_state(tp, target_state);
13879         if (err) {
13880                 int err2;
13881
13882                 tg3_full_lock(tp, 0);
13883
13884                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13885                 err2 = tg3_restart_hw(tp, 1);
13886                 if (err2)
13887                         goto out;
13888
13889                 tp->timer.expires = jiffies + tp->timer_offset;
13890                 add_timer(&tp->timer);
13891
13892                 netif_device_attach(dev);
13893                 tg3_netif_start(tp);
13894
13895 out:
13896                 tg3_full_unlock(tp);
13897
13898                 if (!err2)
13899                         tg3_phy_start(tp);
13900         }
13901
13902         return err;
13903 }
13904
13905 static int tg3_resume(struct pci_dev *pdev)
13906 {
13907         struct net_device *dev = pci_get_drvdata(pdev);
13908         struct tg3 *tp = netdev_priv(dev);
13909         int err;
13910
13911         pci_restore_state(tp->pdev);
13912
13913         if (!netif_running(dev))
13914                 return 0;
13915
13916         err = tg3_set_power_state(tp, PCI_D0);
13917         if (err)
13918                 return err;
13919
13920         netif_device_attach(dev);
13921
13922         tg3_full_lock(tp, 0);
13923
13924         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13925         err = tg3_restart_hw(tp, 1);
13926         if (err)
13927                 goto out;
13928
13929         tp->timer.expires = jiffies + tp->timer_offset;
13930         add_timer(&tp->timer);
13931
13932         tg3_netif_start(tp);
13933
13934 out:
13935         tg3_full_unlock(tp);
13936
13937         if (!err)
13938                 tg3_phy_start(tp);
13939
13940         return err;
13941 }
13942
13943 static struct pci_driver tg3_driver = {
13944         .name           = DRV_MODULE_NAME,
13945         .id_table       = tg3_pci_tbl,
13946         .probe          = tg3_init_one,
13947         .remove         = __devexit_p(tg3_remove_one),
13948         .suspend        = tg3_suspend,
13949         .resume         = tg3_resume
13950 };
13951
13952 static int __init tg3_init(void)
13953 {
13954         return pci_register_driver(&tg3_driver);
13955 }
13956
13957 static void __exit tg3_cleanup(void)
13958 {
13959         pci_unregister_driver(&tg3_driver);
13960 }
13961
13962 module_init(tg3_init);
13963 module_exit(tg3_cleanup);