[TG3]: skb->dev assignment is done by netdev_alloc_skb
[safe/jmp/linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.64"
72 #define DRV_MODULE_RELDATE      "July 31, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         ((TP)->tx_pending -                                             \
128          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130
131 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
132 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
133
134 /* minimum number of free TX descriptors required to wake up TX process */
135 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
136
137 /* number of ETHTOOL_GSTATS u64's */
138 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
140 #define TG3_NUM_TEST            6
141
142 static char version[] __devinitdata =
143         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147 MODULE_LICENSE("GPL");
148 MODULE_VERSION(DRV_MODULE_VERSION);
149
150 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
151 module_param(tg3_debug, int, 0);
152 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154 static struct pci_device_id tg3_pci_tbl[] = {
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
232           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
234           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
236           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
238           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
240           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
242           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
243         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
244           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
245         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
246           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
248           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
249         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
250           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
251         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
252           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
253         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
254           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
255         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
256           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
257         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
258           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
259         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
260           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
261         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
262           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
263         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
264           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
265         { 0, }
266 };
267
268 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
269
270 static struct {
271         const char string[ETH_GSTRING_LEN];
272 } ethtool_stats_keys[TG3_NUM_STATS] = {
273         { "rx_octets" },
274         { "rx_fragments" },
275         { "rx_ucast_packets" },
276         { "rx_mcast_packets" },
277         { "rx_bcast_packets" },
278         { "rx_fcs_errors" },
279         { "rx_align_errors" },
280         { "rx_xon_pause_rcvd" },
281         { "rx_xoff_pause_rcvd" },
282         { "rx_mac_ctrl_rcvd" },
283         { "rx_xoff_entered" },
284         { "rx_frame_too_long_errors" },
285         { "rx_jabbers" },
286         { "rx_undersize_packets" },
287         { "rx_in_length_errors" },
288         { "rx_out_length_errors" },
289         { "rx_64_or_less_octet_packets" },
290         { "rx_65_to_127_octet_packets" },
291         { "rx_128_to_255_octet_packets" },
292         { "rx_256_to_511_octet_packets" },
293         { "rx_512_to_1023_octet_packets" },
294         { "rx_1024_to_1522_octet_packets" },
295         { "rx_1523_to_2047_octet_packets" },
296         { "rx_2048_to_4095_octet_packets" },
297         { "rx_4096_to_8191_octet_packets" },
298         { "rx_8192_to_9022_octet_packets" },
299
300         { "tx_octets" },
301         { "tx_collisions" },
302
303         { "tx_xon_sent" },
304         { "tx_xoff_sent" },
305         { "tx_flow_control" },
306         { "tx_mac_errors" },
307         { "tx_single_collisions" },
308         { "tx_mult_collisions" },
309         { "tx_deferred" },
310         { "tx_excessive_collisions" },
311         { "tx_late_collisions" },
312         { "tx_collide_2times" },
313         { "tx_collide_3times" },
314         { "tx_collide_4times" },
315         { "tx_collide_5times" },
316         { "tx_collide_6times" },
317         { "tx_collide_7times" },
318         { "tx_collide_8times" },
319         { "tx_collide_9times" },
320         { "tx_collide_10times" },
321         { "tx_collide_11times" },
322         { "tx_collide_12times" },
323         { "tx_collide_13times" },
324         { "tx_collide_14times" },
325         { "tx_collide_15times" },
326         { "tx_ucast_packets" },
327         { "tx_mcast_packets" },
328         { "tx_bcast_packets" },
329         { "tx_carrier_sense_errors" },
330         { "tx_discards" },
331         { "tx_errors" },
332
333         { "dma_writeq_full" },
334         { "dma_write_prioq_full" },
335         { "rxbds_empty" },
336         { "rx_discards" },
337         { "rx_errors" },
338         { "rx_threshold_hit" },
339
340         { "dma_readq_full" },
341         { "dma_read_prioq_full" },
342         { "tx_comp_queue_full" },
343
344         { "ring_set_send_prod_index" },
345         { "ring_status_update" },
346         { "nic_irqs" },
347         { "nic_avoided_irqs" },
348         { "nic_tx_threshold_hit" }
349 };
350
351 static struct {
352         const char string[ETH_GSTRING_LEN];
353 } ethtool_test_keys[TG3_NUM_TEST] = {
354         { "nvram test     (online) " },
355         { "link test      (online) " },
356         { "register test  (offline)" },
357         { "memory test    (offline)" },
358         { "loopback test  (offline)" },
359         { "interrupt test (offline)" },
360 };
361
362 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
363 {
364         writel(val, tp->regs + off);
365 }
366
367 static u32 tg3_read32(struct tg3 *tp, u32 off)
368 {
369         return (readl(tp->regs + off)); 
370 }
371
372 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
373 {
374         unsigned long flags;
375
376         spin_lock_irqsave(&tp->indirect_lock, flags);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
379         spin_unlock_irqrestore(&tp->indirect_lock, flags);
380 }
381
382 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
383 {
384         writel(val, tp->regs + off);
385         readl(tp->regs + off);
386 }
387
388 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
401 {
402         unsigned long flags;
403
404         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
405                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
406                                        TG3_64BIT_REG_LOW, val);
407                 return;
408         }
409         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
410                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
411                                        TG3_64BIT_REG_LOW, val);
412                 return;
413         }
414
415         spin_lock_irqsave(&tp->indirect_lock, flags);
416         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
417         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
418         spin_unlock_irqrestore(&tp->indirect_lock, flags);
419
420         /* In indirect mode when disabling interrupts, we also need
421          * to clear the interrupt bit in the GRC local ctrl register.
422          */
423         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
424             (val == 0x1)) {
425                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
426                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
427         }
428 }
429
430 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
431 {
432         unsigned long flags;
433         u32 val;
434
435         spin_lock_irqsave(&tp->indirect_lock, flags);
436         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
437         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
438         spin_unlock_irqrestore(&tp->indirect_lock, flags);
439         return val;
440 }
441
442 /* usec_wait specifies the wait time in usec when writing to certain registers
443  * where it is unsafe to read back the register without some delay.
444  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
445  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
446  */
447 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
448 {
449         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
450             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
451                 /* Non-posted methods */
452                 tp->write32(tp, off, val);
453         else {
454                 /* Posted method */
455                 tg3_write32(tp, off, val);
456                 if (usec_wait)
457                         udelay(usec_wait);
458                 tp->read32(tp, off);
459         }
460         /* Wait again after the read for the posted method to guarantee that
461          * the wait time is met.
462          */
463         if (usec_wait)
464                 udelay(usec_wait);
465 }
466
467 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
468 {
469         tp->write32_mbox(tp, off, val);
470         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
471             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
472                 tp->read32_mbox(tp, off);
473 }
474
475 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
476 {
477         void __iomem *mbox = tp->regs + off;
478         writel(val, mbox);
479         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
480                 writel(val, mbox);
481         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
482                 readl(mbox);
483 }
484
485 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
486 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
487 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
488 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
489 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
490
491 #define tw32(reg,val)           tp->write32(tp, reg, val)
492 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
493 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
494 #define tr32(reg)               tp->read32(tp, reg)
495
496 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
497 {
498         unsigned long flags;
499
500         spin_lock_irqsave(&tp->indirect_lock, flags);
501         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
502                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
503                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
504
505                 /* Always leave this as zero. */
506                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
507         } else {
508                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
509                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
510
511                 /* Always leave this as zero. */
512                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
513         }
514         spin_unlock_irqrestore(&tp->indirect_lock, flags);
515 }
516
517 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
518 {
519         unsigned long flags;
520
521         spin_lock_irqsave(&tp->indirect_lock, flags);
522         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
523                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
524                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
525
526                 /* Always leave this as zero. */
527                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
528         } else {
529                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
530                 *val = tr32(TG3PCI_MEM_WIN_DATA);
531
532                 /* Always leave this as zero. */
533                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
534         }
535         spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 }
537
538 static void tg3_disable_ints(struct tg3 *tp)
539 {
540         tw32(TG3PCI_MISC_HOST_CTRL,
541              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
542         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
543 }
544
545 static inline void tg3_cond_int(struct tg3 *tp)
546 {
547         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
548             (tp->hw_status->status & SD_STATUS_UPDATED))
549                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
550 }
551
552 static void tg3_enable_ints(struct tg3 *tp)
553 {
554         tp->irq_sync = 0;
555         wmb();
556
557         tw32(TG3PCI_MISC_HOST_CTRL,
558              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
559         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
560                        (tp->last_tag << 24));
561         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
562                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
563                                (tp->last_tag << 24));
564         tg3_cond_int(tp);
565 }
566
567 static inline unsigned int tg3_has_work(struct tg3 *tp)
568 {
569         struct tg3_hw_status *sblk = tp->hw_status;
570         unsigned int work_exists = 0;
571
572         /* check for phy events */
573         if (!(tp->tg3_flags &
574               (TG3_FLAG_USE_LINKCHG_REG |
575                TG3_FLAG_POLL_SERDES))) {
576                 if (sblk->status & SD_STATUS_LINK_CHG)
577                         work_exists = 1;
578         }
579         /* check for RX/TX work to do */
580         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
581             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
582                 work_exists = 1;
583
584         return work_exists;
585 }
586
587 /* tg3_restart_ints
588  *  similar to tg3_enable_ints, but it accurately determines whether there
589  *  is new work pending and can return without flushing the PIO write
590  *  which reenables interrupts 
591  */
592 static void tg3_restart_ints(struct tg3 *tp)
593 {
594         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
595                      tp->last_tag << 24);
596         mmiowb();
597
598         /* When doing tagged status, this work check is unnecessary.
599          * The last_tag we write above tells the chip which piece of
600          * work we've completed.
601          */
602         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603             tg3_has_work(tp))
604                 tw32(HOSTCC_MODE, tp->coalesce_mode |
605                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
606 }
607
608 static inline void tg3_netif_stop(struct tg3 *tp)
609 {
610         tp->dev->trans_start = jiffies; /* prevent tx timeout */
611         netif_poll_disable(tp->dev);
612         netif_tx_disable(tp->dev);
613 }
614
615 static inline void tg3_netif_start(struct tg3 *tp)
616 {
617         netif_wake_queue(tp->dev);
618         /* NOTE: unconditional netif_wake_queue is only appropriate
619          * so long as all callers are assured to have free tx slots
620          * (such as after tg3_init_hw)
621          */
622         netif_poll_enable(tp->dev);
623         tp->hw_status->status |= SD_STATUS_UPDATED;
624         tg3_enable_ints(tp);
625 }
626
627 static void tg3_switch_clocks(struct tg3 *tp)
628 {
629         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
630         u32 orig_clock_ctrl;
631
632         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
633                 return;
634
635         orig_clock_ctrl = clock_ctrl;
636         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
637                        CLOCK_CTRL_CLKRUN_OENABLE |
638                        0x1f);
639         tp->pci_clock_ctrl = clock_ctrl;
640
641         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
642                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
643                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
644                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
645                 }
646         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
647                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
648                             clock_ctrl |
649                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
650                             40);
651                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
652                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
653                             40);
654         }
655         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
656 }
657
658 #define PHY_BUSY_LOOPS  5000
659
660 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
661 {
662         u32 frame_val;
663         unsigned int loops;
664         int ret;
665
666         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
667                 tw32_f(MAC_MI_MODE,
668                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
669                 udelay(80);
670         }
671
672         *val = 0x0;
673
674         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
675                       MI_COM_PHY_ADDR_MASK);
676         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
677                       MI_COM_REG_ADDR_MASK);
678         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
679         
680         tw32_f(MAC_MI_COM, frame_val);
681
682         loops = PHY_BUSY_LOOPS;
683         while (loops != 0) {
684                 udelay(10);
685                 frame_val = tr32(MAC_MI_COM);
686
687                 if ((frame_val & MI_COM_BUSY) == 0) {
688                         udelay(5);
689                         frame_val = tr32(MAC_MI_COM);
690                         break;
691                 }
692                 loops -= 1;
693         }
694
695         ret = -EBUSY;
696         if (loops != 0) {
697                 *val = frame_val & MI_COM_DATA_MASK;
698                 ret = 0;
699         }
700
701         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
702                 tw32_f(MAC_MI_MODE, tp->mi_mode);
703                 udelay(80);
704         }
705
706         return ret;
707 }
708
709 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
722                       MI_COM_PHY_ADDR_MASK);
723         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
724                       MI_COM_REG_ADDR_MASK);
725         frame_val |= (val & MI_COM_DATA_MASK);
726         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
727         
728         tw32_f(MAC_MI_COM, frame_val);
729
730         loops = PHY_BUSY_LOOPS;
731         while (loops != 0) {
732                 udelay(10);
733                 frame_val = tr32(MAC_MI_COM);
734                 if ((frame_val & MI_COM_BUSY) == 0) {
735                         udelay(5);
736                         frame_val = tr32(MAC_MI_COM);
737                         break;
738                 }
739                 loops -= 1;
740         }
741
742         ret = -EBUSY;
743         if (loops != 0)
744                 ret = 0;
745
746         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
747                 tw32_f(MAC_MI_MODE, tp->mi_mode);
748                 udelay(80);
749         }
750
751         return ret;
752 }
753
754 static void tg3_phy_set_wirespeed(struct tg3 *tp)
755 {
756         u32 val;
757
758         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
759                 return;
760
761         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
762             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
763                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
764                              (val | (1 << 15) | (1 << 4)));
765 }
766
767 static int tg3_bmcr_reset(struct tg3 *tp)
768 {
769         u32 phy_control;
770         int limit, err;
771
772         /* OK, reset it, and poll the BMCR_RESET bit until it
773          * clears or we time out.
774          */
775         phy_control = BMCR_RESET;
776         err = tg3_writephy(tp, MII_BMCR, phy_control);
777         if (err != 0)
778                 return -EBUSY;
779
780         limit = 5000;
781         while (limit--) {
782                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
783                 if (err != 0)
784                         return -EBUSY;
785
786                 if ((phy_control & BMCR_RESET) == 0) {
787                         udelay(40);
788                         break;
789                 }
790                 udelay(10);
791         }
792         if (limit <= 0)
793                 return -EBUSY;
794
795         return 0;
796 }
797
798 static int tg3_wait_macro_done(struct tg3 *tp)
799 {
800         int limit = 100;
801
802         while (limit--) {
803                 u32 tmp32;
804
805                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
806                         if ((tmp32 & 0x1000) == 0)
807                                 break;
808                 }
809         }
810         if (limit <= 0)
811                 return -EBUSY;
812
813         return 0;
814 }
815
816 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
817 {
818         static const u32 test_pat[4][6] = {
819         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
820         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
821         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
822         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
823         };
824         int chan;
825
826         for (chan = 0; chan < 4; chan++) {
827                 int i;
828
829                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
830                              (chan * 0x2000) | 0x0200);
831                 tg3_writephy(tp, 0x16, 0x0002);
832
833                 for (i = 0; i < 6; i++)
834                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
835                                      test_pat[chan][i]);
836
837                 tg3_writephy(tp, 0x16, 0x0202);
838                 if (tg3_wait_macro_done(tp)) {
839                         *resetp = 1;
840                         return -EBUSY;
841                 }
842
843                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
844                              (chan * 0x2000) | 0x0200);
845                 tg3_writephy(tp, 0x16, 0x0082);
846                 if (tg3_wait_macro_done(tp)) {
847                         *resetp = 1;
848                         return -EBUSY;
849                 }
850
851                 tg3_writephy(tp, 0x16, 0x0802);
852                 if (tg3_wait_macro_done(tp)) {
853                         *resetp = 1;
854                         return -EBUSY;
855                 }
856
857                 for (i = 0; i < 6; i += 2) {
858                         u32 low, high;
859
860                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
861                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
862                             tg3_wait_macro_done(tp)) {
863                                 *resetp = 1;
864                                 return -EBUSY;
865                         }
866                         low &= 0x7fff;
867                         high &= 0x000f;
868                         if (low != test_pat[chan][i] ||
869                             high != test_pat[chan][i+1]) {
870                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
871                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
872                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
873
874                                 return -EBUSY;
875                         }
876                 }
877         }
878
879         return 0;
880 }
881
882 static int tg3_phy_reset_chanpat(struct tg3 *tp)
883 {
884         int chan;
885
886         for (chan = 0; chan < 4; chan++) {
887                 int i;
888
889                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
890                              (chan * 0x2000) | 0x0200);
891                 tg3_writephy(tp, 0x16, 0x0002);
892                 for (i = 0; i < 6; i++)
893                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
894                 tg3_writephy(tp, 0x16, 0x0202);
895                 if (tg3_wait_macro_done(tp))
896                         return -EBUSY;
897         }
898
899         return 0;
900 }
901
902 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
903 {
904         u32 reg32, phy9_orig;
905         int retries, do_phy_reset, err;
906
907         retries = 10;
908         do_phy_reset = 1;
909         do {
910                 if (do_phy_reset) {
911                         err = tg3_bmcr_reset(tp);
912                         if (err)
913                                 return err;
914                         do_phy_reset = 0;
915                 }
916
917                 /* Disable transmitter and interrupt.  */
918                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
919                         continue;
920
921                 reg32 |= 0x3000;
922                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
923
924                 /* Set full-duplex, 1000 mbps.  */
925                 tg3_writephy(tp, MII_BMCR,
926                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
927
928                 /* Set to master mode.  */
929                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
930                         continue;
931
932                 tg3_writephy(tp, MII_TG3_CTRL,
933                              (MII_TG3_CTRL_AS_MASTER |
934                               MII_TG3_CTRL_ENABLE_AS_MASTER));
935
936                 /* Enable SM_DSP_CLOCK and 6dB.  */
937                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
938
939                 /* Block the PHY control access.  */
940                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
941                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
942
943                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
944                 if (!err)
945                         break;
946         } while (--retries);
947
948         err = tg3_phy_reset_chanpat(tp);
949         if (err)
950                 return err;
951
952         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
953         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
954
955         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
956         tg3_writephy(tp, 0x16, 0x0000);
957
958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
959             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
960                 /* Set Extended packet length bit for jumbo frames */
961                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
962         }
963         else {
964                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
965         }
966
967         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
968
969         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
970                 reg32 &= ~0x3000;
971                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
972         } else if (!err)
973                 err = -EBUSY;
974
975         return err;
976 }
977
978 static void tg3_link_report(struct tg3 *);
979
980 /* This will reset the tigon3 PHY if there is no valid
981  * link unless the FORCE argument is non-zero.
982  */
983 static int tg3_phy_reset(struct tg3 *tp)
984 {
985         u32 phy_status;
986         int err;
987
988         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
989         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
990         if (err != 0)
991                 return -EBUSY;
992
993         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
994                 netif_carrier_off(tp->dev);
995                 tg3_link_report(tp);
996         }
997
998         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
999             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1000             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1001                 err = tg3_phy_reset_5703_4_5(tp);
1002                 if (err)
1003                         return err;
1004                 goto out;
1005         }
1006
1007         err = tg3_bmcr_reset(tp);
1008         if (err)
1009                 return err;
1010
1011 out:
1012         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1013                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1014                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1015                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1016                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1017                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1018                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1019         }
1020         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1021                 tg3_writephy(tp, 0x1c, 0x8d68);
1022                 tg3_writephy(tp, 0x1c, 0x8d68);
1023         }
1024         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1025                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1026                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1027                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1028                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1029                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1030                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1031                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1032                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1033         }
1034         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1035                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1036                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1037                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1038                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1039         }
1040         /* Set Extended packet length bit (bit 14) on all chips that */
1041         /* support jumbo frames */
1042         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1043                 /* Cannot do read-modify-write on 5401 */
1044                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1045         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1046                 u32 phy_reg;
1047
1048                 /* Set bit 14 with read-modify-write to preserve other bits */
1049                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1050                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1051                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1052         }
1053
1054         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1055          * jumbo frames transmission.
1056          */
1057         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1058                 u32 phy_reg;
1059
1060                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1061                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1062                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1063         }
1064
1065         tg3_phy_set_wirespeed(tp);
1066         return 0;
1067 }
1068
1069 static void tg3_frob_aux_power(struct tg3 *tp)
1070 {
1071         struct tg3 *tp_peer = tp;
1072
1073         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1074                 return;
1075
1076         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1077             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1078                 struct net_device *dev_peer;
1079
1080                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1081                 /* remove_one() may have been run on the peer. */
1082                 if (!dev_peer)
1083                         tp_peer = tp;
1084                 else
1085                         tp_peer = netdev_priv(dev_peer);
1086         }
1087
1088         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1089             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1090             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1091             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1092                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1093                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1094                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1095                                     (GRC_LCLCTRL_GPIO_OE0 |
1096                                      GRC_LCLCTRL_GPIO_OE1 |
1097                                      GRC_LCLCTRL_GPIO_OE2 |
1098                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1099                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1100                                     100);
1101                 } else {
1102                         u32 no_gpio2;
1103                         u32 grc_local_ctrl = 0;
1104
1105                         if (tp_peer != tp &&
1106                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1107                                 return;
1108
1109                         /* Workaround to prevent overdrawing Amps. */
1110                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1111                             ASIC_REV_5714) {
1112                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1113                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114                                             grc_local_ctrl, 100);
1115                         }
1116
1117                         /* On 5753 and variants, GPIO2 cannot be used. */
1118                         no_gpio2 = tp->nic_sram_data_cfg &
1119                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1120
1121                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1122                                          GRC_LCLCTRL_GPIO_OE1 |
1123                                          GRC_LCLCTRL_GPIO_OE2 |
1124                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1125                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1126                         if (no_gpio2) {
1127                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1128                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1129                         }
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                                     grc_local_ctrl, 100);
1132
1133                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1134
1135                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1136                                                     grc_local_ctrl, 100);
1137
1138                         if (!no_gpio2) {
1139                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1140                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1141                                             grc_local_ctrl, 100);
1142                         }
1143                 }
1144         } else {
1145                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1146                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1147                         if (tp_peer != tp &&
1148                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1149                                 return;
1150
1151                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1152                                     (GRC_LCLCTRL_GPIO_OE1 |
1153                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1154
1155                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1156                                     GRC_LCLCTRL_GPIO_OE1, 100);
1157
1158                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1159                                     (GRC_LCLCTRL_GPIO_OE1 |
1160                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1161                 }
1162         }
1163 }
1164
1165 static int tg3_setup_phy(struct tg3 *, int);
1166
1167 #define RESET_KIND_SHUTDOWN     0
1168 #define RESET_KIND_INIT         1
1169 #define RESET_KIND_SUSPEND      2
1170
1171 static void tg3_write_sig_post_reset(struct tg3 *, int);
1172 static int tg3_halt_cpu(struct tg3 *, u32);
1173 static int tg3_nvram_lock(struct tg3 *);
1174 static void tg3_nvram_unlock(struct tg3 *);
1175
1176 static void tg3_power_down_phy(struct tg3 *tp)
1177 {
1178         /* The PHY should not be powered down on some chips because
1179          * of bugs.
1180          */
1181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1182             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1183             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1184              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1185                 return;
1186         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1187 }
1188
1189 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1190 {
1191         u32 misc_host_ctrl;
1192         u16 power_control, power_caps;
1193         int pm = tp->pm_cap;
1194
1195         /* Make sure register accesses (indirect or otherwise)
1196          * will function correctly.
1197          */
1198         pci_write_config_dword(tp->pdev,
1199                                TG3PCI_MISC_HOST_CTRL,
1200                                tp->misc_host_ctrl);
1201
1202         pci_read_config_word(tp->pdev,
1203                              pm + PCI_PM_CTRL,
1204                              &power_control);
1205         power_control |= PCI_PM_CTRL_PME_STATUS;
1206         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1207         switch (state) {
1208         case PCI_D0:
1209                 power_control |= 0;
1210                 pci_write_config_word(tp->pdev,
1211                                       pm + PCI_PM_CTRL,
1212                                       power_control);
1213                 udelay(100);    /* Delay after power state change */
1214
1215                 /* Switch out of Vaux if it is not a LOM */
1216                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1217                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1218
1219                 return 0;
1220
1221         case PCI_D1:
1222                 power_control |= 1;
1223                 break;
1224
1225         case PCI_D2:
1226                 power_control |= 2;
1227                 break;
1228
1229         case PCI_D3hot:
1230                 power_control |= 3;
1231                 break;
1232
1233         default:
1234                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1235                        "requested.\n",
1236                        tp->dev->name, state);
1237                 return -EINVAL;
1238         };
1239
1240         power_control |= PCI_PM_CTRL_PME_ENABLE;
1241
1242         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1243         tw32(TG3PCI_MISC_HOST_CTRL,
1244              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1245
1246         if (tp->link_config.phy_is_low_power == 0) {
1247                 tp->link_config.phy_is_low_power = 1;
1248                 tp->link_config.orig_speed = tp->link_config.speed;
1249                 tp->link_config.orig_duplex = tp->link_config.duplex;
1250                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1251         }
1252
1253         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1254                 tp->link_config.speed = SPEED_10;
1255                 tp->link_config.duplex = DUPLEX_HALF;
1256                 tp->link_config.autoneg = AUTONEG_ENABLE;
1257                 tg3_setup_phy(tp, 0);
1258         }
1259
1260         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1261                 int i;
1262                 u32 val;
1263
1264                 for (i = 0; i < 200; i++) {
1265                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1266                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1267                                 break;
1268                         msleep(1);
1269                 }
1270         }
1271         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1272                                              WOL_DRV_STATE_SHUTDOWN |
1273                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1274
1275         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1276
1277         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1278                 u32 mac_mode;
1279
1280                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1281                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1282                         udelay(40);
1283
1284                         mac_mode = MAC_MODE_PORT_MODE_MII;
1285
1286                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1287                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1288                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1289                 } else {
1290                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1291                 }
1292
1293                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1294                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1295
1296                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1297                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1298                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1299
1300                 tw32_f(MAC_MODE, mac_mode);
1301                 udelay(100);
1302
1303                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1304                 udelay(10);
1305         }
1306
1307         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1308             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1309              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1310                 u32 base_val;
1311
1312                 base_val = tp->pci_clock_ctrl;
1313                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1314                              CLOCK_CTRL_TXCLK_DISABLE);
1315
1316                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1317                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1318         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1319                 /* do nothing */
1320         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1321                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1322                 u32 newbits1, newbits2;
1323
1324                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1325                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1326                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1327                                     CLOCK_CTRL_TXCLK_DISABLE |
1328                                     CLOCK_CTRL_ALTCLK);
1329                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1330                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1331                         newbits1 = CLOCK_CTRL_625_CORE;
1332                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1333                 } else {
1334                         newbits1 = CLOCK_CTRL_ALTCLK;
1335                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1336                 }
1337
1338                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1339                             40);
1340
1341                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1342                             40);
1343
1344                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1345                         u32 newbits3;
1346
1347                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1348                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1349                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1350                                             CLOCK_CTRL_TXCLK_DISABLE |
1351                                             CLOCK_CTRL_44MHZ_CORE);
1352                         } else {
1353                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1354                         }
1355
1356                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1357                                     tp->pci_clock_ctrl | newbits3, 40);
1358                 }
1359         }
1360
1361         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1362             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1363                 /* Turn off the PHY */
1364                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1365                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1366                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1367                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1368                         tg3_power_down_phy(tp);
1369                 }
1370         }
1371
1372         tg3_frob_aux_power(tp);
1373
1374         /* Workaround for unstable PLL clock */
1375         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1376             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1377                 u32 val = tr32(0x7d00);
1378
1379                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1380                 tw32(0x7d00, val);
1381                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1382                         int err;
1383
1384                         err = tg3_nvram_lock(tp);
1385                         tg3_halt_cpu(tp, RX_CPU_BASE);
1386                         if (!err)
1387                                 tg3_nvram_unlock(tp);
1388                 }
1389         }
1390
1391         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1392
1393         /* Finally, set the new power state. */
1394         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1395         udelay(100);    /* Delay after power state change */
1396
1397         return 0;
1398 }
1399
1400 static void tg3_link_report(struct tg3 *tp)
1401 {
1402         if (!netif_carrier_ok(tp->dev)) {
1403                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1404         } else {
1405                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1406                        tp->dev->name,
1407                        (tp->link_config.active_speed == SPEED_1000 ?
1408                         1000 :
1409                         (tp->link_config.active_speed == SPEED_100 ?
1410                          100 : 10)),
1411                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1412                         "full" : "half"));
1413
1414                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1415                        "%s for RX.\n",
1416                        tp->dev->name,
1417                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1418                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1419         }
1420 }
1421
1422 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1423 {
1424         u32 new_tg3_flags = 0;
1425         u32 old_rx_mode = tp->rx_mode;
1426         u32 old_tx_mode = tp->tx_mode;
1427
1428         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1429
1430                 /* Convert 1000BaseX flow control bits to 1000BaseT
1431                  * bits before resolving flow control.
1432                  */
1433                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1434                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1435                                        ADVERTISE_PAUSE_ASYM);
1436                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1437
1438                         if (local_adv & ADVERTISE_1000XPAUSE)
1439                                 local_adv |= ADVERTISE_PAUSE_CAP;
1440                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1441                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1442                         if (remote_adv & LPA_1000XPAUSE)
1443                                 remote_adv |= LPA_PAUSE_CAP;
1444                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1445                                 remote_adv |= LPA_PAUSE_ASYM;
1446                 }
1447
1448                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1449                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1450                                 if (remote_adv & LPA_PAUSE_CAP)
1451                                         new_tg3_flags |=
1452                                                 (TG3_FLAG_RX_PAUSE |
1453                                                 TG3_FLAG_TX_PAUSE);
1454                                 else if (remote_adv & LPA_PAUSE_ASYM)
1455                                         new_tg3_flags |=
1456                                                 (TG3_FLAG_RX_PAUSE);
1457                         } else {
1458                                 if (remote_adv & LPA_PAUSE_CAP)
1459                                         new_tg3_flags |=
1460                                                 (TG3_FLAG_RX_PAUSE |
1461                                                 TG3_FLAG_TX_PAUSE);
1462                         }
1463                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1464                         if ((remote_adv & LPA_PAUSE_CAP) &&
1465                         (remote_adv & LPA_PAUSE_ASYM))
1466                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1467                 }
1468
1469                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1470                 tp->tg3_flags |= new_tg3_flags;
1471         } else {
1472                 new_tg3_flags = tp->tg3_flags;
1473         }
1474
1475         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1476                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1477         else
1478                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1479
1480         if (old_rx_mode != tp->rx_mode) {
1481                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1482         }
1483         
1484         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1485                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1486         else
1487                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1488
1489         if (old_tx_mode != tp->tx_mode) {
1490                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1491         }
1492 }
1493
1494 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1495 {
1496         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1497         case MII_TG3_AUX_STAT_10HALF:
1498                 *speed = SPEED_10;
1499                 *duplex = DUPLEX_HALF;
1500                 break;
1501
1502         case MII_TG3_AUX_STAT_10FULL:
1503                 *speed = SPEED_10;
1504                 *duplex = DUPLEX_FULL;
1505                 break;
1506
1507         case MII_TG3_AUX_STAT_100HALF:
1508                 *speed = SPEED_100;
1509                 *duplex = DUPLEX_HALF;
1510                 break;
1511
1512         case MII_TG3_AUX_STAT_100FULL:
1513                 *speed = SPEED_100;
1514                 *duplex = DUPLEX_FULL;
1515                 break;
1516
1517         case MII_TG3_AUX_STAT_1000HALF:
1518                 *speed = SPEED_1000;
1519                 *duplex = DUPLEX_HALF;
1520                 break;
1521
1522         case MII_TG3_AUX_STAT_1000FULL:
1523                 *speed = SPEED_1000;
1524                 *duplex = DUPLEX_FULL;
1525                 break;
1526
1527         default:
1528                 *speed = SPEED_INVALID;
1529                 *duplex = DUPLEX_INVALID;
1530                 break;
1531         };
1532 }
1533
1534 static void tg3_phy_copper_begin(struct tg3 *tp)
1535 {
1536         u32 new_adv;
1537         int i;
1538
1539         if (tp->link_config.phy_is_low_power) {
1540                 /* Entering low power mode.  Disable gigabit and
1541                  * 100baseT advertisements.
1542                  */
1543                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1544
1545                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1546                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1547                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1548                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1549
1550                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1551         } else if (tp->link_config.speed == SPEED_INVALID) {
1552                 tp->link_config.advertising =
1553                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1554                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1555                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1556                          ADVERTISED_Autoneg | ADVERTISED_MII);
1557
1558                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1559                         tp->link_config.advertising &=
1560                                 ~(ADVERTISED_1000baseT_Half |
1561                                   ADVERTISED_1000baseT_Full);
1562
1563                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1564                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1565                         new_adv |= ADVERTISE_10HALF;
1566                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1567                         new_adv |= ADVERTISE_10FULL;
1568                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1569                         new_adv |= ADVERTISE_100HALF;
1570                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1571                         new_adv |= ADVERTISE_100FULL;
1572                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1573
1574                 if (tp->link_config.advertising &
1575                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1576                         new_adv = 0;
1577                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1578                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1579                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1580                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1581                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1582                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1583                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1584                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1585                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1586                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1587                 } else {
1588                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1589                 }
1590         } else {
1591                 /* Asking for a specific link mode. */
1592                 if (tp->link_config.speed == SPEED_1000) {
1593                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1594                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1595
1596                         if (tp->link_config.duplex == DUPLEX_FULL)
1597                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1598                         else
1599                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1600                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1601                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1602                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1603                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1604                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1605                 } else {
1606                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1607
1608                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1609                         if (tp->link_config.speed == SPEED_100) {
1610                                 if (tp->link_config.duplex == DUPLEX_FULL)
1611                                         new_adv |= ADVERTISE_100FULL;
1612                                 else
1613                                         new_adv |= ADVERTISE_100HALF;
1614                         } else {
1615                                 if (tp->link_config.duplex == DUPLEX_FULL)
1616                                         new_adv |= ADVERTISE_10FULL;
1617                                 else
1618                                         new_adv |= ADVERTISE_10HALF;
1619                         }
1620                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1621                 }
1622         }
1623
1624         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1625             tp->link_config.speed != SPEED_INVALID) {
1626                 u32 bmcr, orig_bmcr;
1627
1628                 tp->link_config.active_speed = tp->link_config.speed;
1629                 tp->link_config.active_duplex = tp->link_config.duplex;
1630
1631                 bmcr = 0;
1632                 switch (tp->link_config.speed) {
1633                 default:
1634                 case SPEED_10:
1635                         break;
1636
1637                 case SPEED_100:
1638                         bmcr |= BMCR_SPEED100;
1639                         break;
1640
1641                 case SPEED_1000:
1642                         bmcr |= TG3_BMCR_SPEED1000;
1643                         break;
1644                 };
1645
1646                 if (tp->link_config.duplex == DUPLEX_FULL)
1647                         bmcr |= BMCR_FULLDPLX;
1648
1649                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1650                     (bmcr != orig_bmcr)) {
1651                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1652                         for (i = 0; i < 1500; i++) {
1653                                 u32 tmp;
1654
1655                                 udelay(10);
1656                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1657                                     tg3_readphy(tp, MII_BMSR, &tmp))
1658                                         continue;
1659                                 if (!(tmp & BMSR_LSTATUS)) {
1660                                         udelay(40);
1661                                         break;
1662                                 }
1663                         }
1664                         tg3_writephy(tp, MII_BMCR, bmcr);
1665                         udelay(40);
1666                 }
1667         } else {
1668                 tg3_writephy(tp, MII_BMCR,
1669                              BMCR_ANENABLE | BMCR_ANRESTART);
1670         }
1671 }
1672
1673 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1674 {
1675         int err;
1676
1677         /* Turn off tap power management. */
1678         /* Set Extended packet length bit */
1679         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1680
1681         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1682         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1683
1684         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1685         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1686
1687         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1688         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1689
1690         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1691         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1692
1693         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1694         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1695
1696         udelay(40);
1697
1698         return err;
1699 }
1700
1701 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1702 {
1703         u32 adv_reg, all_mask;
1704
1705         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1706                 return 0;
1707
1708         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1709                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1710         if ((adv_reg & all_mask) != all_mask)
1711                 return 0;
1712         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1713                 u32 tg3_ctrl;
1714
1715                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1716                         return 0;
1717
1718                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1719                             MII_TG3_CTRL_ADV_1000_FULL);
1720                 if ((tg3_ctrl & all_mask) != all_mask)
1721                         return 0;
1722         }
1723         return 1;
1724 }
1725
1726 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1727 {
1728         int current_link_up;
1729         u32 bmsr, dummy;
1730         u16 current_speed;
1731         u8 current_duplex;
1732         int i, err;
1733
1734         tw32(MAC_EVENT, 0);
1735
1736         tw32_f(MAC_STATUS,
1737              (MAC_STATUS_SYNC_CHANGED |
1738               MAC_STATUS_CFG_CHANGED |
1739               MAC_STATUS_MI_COMPLETION |
1740               MAC_STATUS_LNKSTATE_CHANGED));
1741         udelay(40);
1742
1743         tp->mi_mode = MAC_MI_MODE_BASE;
1744         tw32_f(MAC_MI_MODE, tp->mi_mode);
1745         udelay(80);
1746
1747         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1748
1749         /* Some third-party PHYs need to be reset on link going
1750          * down.
1751          */
1752         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1753              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1754              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1755             netif_carrier_ok(tp->dev)) {
1756                 tg3_readphy(tp, MII_BMSR, &bmsr);
1757                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1758                     !(bmsr & BMSR_LSTATUS))
1759                         force_reset = 1;
1760         }
1761         if (force_reset)
1762                 tg3_phy_reset(tp);
1763
1764         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1765                 tg3_readphy(tp, MII_BMSR, &bmsr);
1766                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1767                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1768                         bmsr = 0;
1769
1770                 if (!(bmsr & BMSR_LSTATUS)) {
1771                         err = tg3_init_5401phy_dsp(tp);
1772                         if (err)
1773                                 return err;
1774
1775                         tg3_readphy(tp, MII_BMSR, &bmsr);
1776                         for (i = 0; i < 1000; i++) {
1777                                 udelay(10);
1778                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1779                                     (bmsr & BMSR_LSTATUS)) {
1780                                         udelay(40);
1781                                         break;
1782                                 }
1783                         }
1784
1785                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1786                             !(bmsr & BMSR_LSTATUS) &&
1787                             tp->link_config.active_speed == SPEED_1000) {
1788                                 err = tg3_phy_reset(tp);
1789                                 if (!err)
1790                                         err = tg3_init_5401phy_dsp(tp);
1791                                 if (err)
1792                                         return err;
1793                         }
1794                 }
1795         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1796                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1797                 /* 5701 {A0,B0} CRC bug workaround */
1798                 tg3_writephy(tp, 0x15, 0x0a75);
1799                 tg3_writephy(tp, 0x1c, 0x8c68);
1800                 tg3_writephy(tp, 0x1c, 0x8d68);
1801                 tg3_writephy(tp, 0x1c, 0x8c68);
1802         }
1803
1804         /* Clear pending interrupts... */
1805         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1806         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1807
1808         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1809                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1810         else
1811                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1812
1813         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1814             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1815                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1816                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1817                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1818                 else
1819                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1820         }
1821
1822         current_link_up = 0;
1823         current_speed = SPEED_INVALID;
1824         current_duplex = DUPLEX_INVALID;
1825
1826         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1827                 u32 val;
1828
1829                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1830                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1831                 if (!(val & (1 << 10))) {
1832                         val |= (1 << 10);
1833                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1834                         goto relink;
1835                 }
1836         }
1837
1838         bmsr = 0;
1839         for (i = 0; i < 100; i++) {
1840                 tg3_readphy(tp, MII_BMSR, &bmsr);
1841                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1842                     (bmsr & BMSR_LSTATUS))
1843                         break;
1844                 udelay(40);
1845         }
1846
1847         if (bmsr & BMSR_LSTATUS) {
1848                 u32 aux_stat, bmcr;
1849
1850                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1851                 for (i = 0; i < 2000; i++) {
1852                         udelay(10);
1853                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1854                             aux_stat)
1855                                 break;
1856                 }
1857
1858                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1859                                              &current_speed,
1860                                              &current_duplex);
1861
1862                 bmcr = 0;
1863                 for (i = 0; i < 200; i++) {
1864                         tg3_readphy(tp, MII_BMCR, &bmcr);
1865                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1866                                 continue;
1867                         if (bmcr && bmcr != 0x7fff)
1868                                 break;
1869                         udelay(10);
1870                 }
1871
1872                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1873                         if (bmcr & BMCR_ANENABLE) {
1874                                 current_link_up = 1;
1875
1876                                 /* Force autoneg restart if we are exiting
1877                                  * low power mode.
1878                                  */
1879                                 if (!tg3_copper_is_advertising_all(tp))
1880                                         current_link_up = 0;
1881                         } else {
1882                                 current_link_up = 0;
1883                         }
1884                 } else {
1885                         if (!(bmcr & BMCR_ANENABLE) &&
1886                             tp->link_config.speed == current_speed &&
1887                             tp->link_config.duplex == current_duplex) {
1888                                 current_link_up = 1;
1889                         } else {
1890                                 current_link_up = 0;
1891                         }
1892                 }
1893
1894                 tp->link_config.active_speed = current_speed;
1895                 tp->link_config.active_duplex = current_duplex;
1896         }
1897
1898         if (current_link_up == 1 &&
1899             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1900             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1901                 u32 local_adv, remote_adv;
1902
1903                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1904                         local_adv = 0;
1905                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1906
1907                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1908                         remote_adv = 0;
1909
1910                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1911
1912                 /* If we are not advertising full pause capability,
1913                  * something is wrong.  Bring the link down and reconfigure.
1914                  */
1915                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1916                         current_link_up = 0;
1917                 } else {
1918                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1919                 }
1920         }
1921 relink:
1922         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1923                 u32 tmp;
1924
1925                 tg3_phy_copper_begin(tp);
1926
1927                 tg3_readphy(tp, MII_BMSR, &tmp);
1928                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1929                     (tmp & BMSR_LSTATUS))
1930                         current_link_up = 1;
1931         }
1932
1933         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1934         if (current_link_up == 1) {
1935                 if (tp->link_config.active_speed == SPEED_100 ||
1936                     tp->link_config.active_speed == SPEED_10)
1937                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1938                 else
1939                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1940         } else
1941                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1942
1943         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1944         if (tp->link_config.active_duplex == DUPLEX_HALF)
1945                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1946
1947         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1949                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1950                     (current_link_up == 1 &&
1951                      tp->link_config.active_speed == SPEED_10))
1952                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1953         } else {
1954                 if (current_link_up == 1)
1955                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1956         }
1957
1958         /* ??? Without this setting Netgear GA302T PHY does not
1959          * ??? send/receive packets...
1960          */
1961         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1962             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1963                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1964                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1965                 udelay(80);
1966         }
1967
1968         tw32_f(MAC_MODE, tp->mac_mode);
1969         udelay(40);
1970
1971         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1972                 /* Polled via timer. */
1973                 tw32_f(MAC_EVENT, 0);
1974         } else {
1975                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1976         }
1977         udelay(40);
1978
1979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1980             current_link_up == 1 &&
1981             tp->link_config.active_speed == SPEED_1000 &&
1982             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1983              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1984                 udelay(120);
1985                 tw32_f(MAC_STATUS,
1986                      (MAC_STATUS_SYNC_CHANGED |
1987                       MAC_STATUS_CFG_CHANGED));
1988                 udelay(40);
1989                 tg3_write_mem(tp,
1990                               NIC_SRAM_FIRMWARE_MBOX,
1991                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1992         }
1993
1994         if (current_link_up != netif_carrier_ok(tp->dev)) {
1995                 if (current_link_up)
1996                         netif_carrier_on(tp->dev);
1997                 else
1998                         netif_carrier_off(tp->dev);
1999                 tg3_link_report(tp);
2000         }
2001
2002         return 0;
2003 }
2004
2005 struct tg3_fiber_aneginfo {
2006         int state;
2007 #define ANEG_STATE_UNKNOWN              0
2008 #define ANEG_STATE_AN_ENABLE            1
2009 #define ANEG_STATE_RESTART_INIT         2
2010 #define ANEG_STATE_RESTART              3
2011 #define ANEG_STATE_DISABLE_LINK_OK      4
2012 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2013 #define ANEG_STATE_ABILITY_DETECT       6
2014 #define ANEG_STATE_ACK_DETECT_INIT      7
2015 #define ANEG_STATE_ACK_DETECT           8
2016 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2017 #define ANEG_STATE_COMPLETE_ACK         10
2018 #define ANEG_STATE_IDLE_DETECT_INIT     11
2019 #define ANEG_STATE_IDLE_DETECT          12
2020 #define ANEG_STATE_LINK_OK              13
2021 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2022 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2023
2024         u32 flags;
2025 #define MR_AN_ENABLE            0x00000001
2026 #define MR_RESTART_AN           0x00000002
2027 #define MR_AN_COMPLETE          0x00000004
2028 #define MR_PAGE_RX              0x00000008
2029 #define MR_NP_LOADED            0x00000010
2030 #define MR_TOGGLE_TX            0x00000020
2031 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2032 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2033 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2034 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2035 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2036 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2037 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2038 #define MR_TOGGLE_RX            0x00002000
2039 #define MR_NP_RX                0x00004000
2040
2041 #define MR_LINK_OK              0x80000000
2042
2043         unsigned long link_time, cur_time;
2044
2045         u32 ability_match_cfg;
2046         int ability_match_count;
2047
2048         char ability_match, idle_match, ack_match;
2049
2050         u32 txconfig, rxconfig;
2051 #define ANEG_CFG_NP             0x00000080
2052 #define ANEG_CFG_ACK            0x00000040
2053 #define ANEG_CFG_RF2            0x00000020
2054 #define ANEG_CFG_RF1            0x00000010
2055 #define ANEG_CFG_PS2            0x00000001
2056 #define ANEG_CFG_PS1            0x00008000
2057 #define ANEG_CFG_HD             0x00004000
2058 #define ANEG_CFG_FD             0x00002000
2059 #define ANEG_CFG_INVAL          0x00001f06
2060
2061 };
2062 #define ANEG_OK         0
2063 #define ANEG_DONE       1
2064 #define ANEG_TIMER_ENAB 2
2065 #define ANEG_FAILED     -1
2066
2067 #define ANEG_STATE_SETTLE_TIME  10000
2068
2069 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2070                                    struct tg3_fiber_aneginfo *ap)
2071 {
2072         unsigned long delta;
2073         u32 rx_cfg_reg;
2074         int ret;
2075
2076         if (ap->state == ANEG_STATE_UNKNOWN) {
2077                 ap->rxconfig = 0;
2078                 ap->link_time = 0;
2079                 ap->cur_time = 0;
2080                 ap->ability_match_cfg = 0;
2081                 ap->ability_match_count = 0;
2082                 ap->ability_match = 0;
2083                 ap->idle_match = 0;
2084                 ap->ack_match = 0;
2085         }
2086         ap->cur_time++;
2087
2088         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2089                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2090
2091                 if (rx_cfg_reg != ap->ability_match_cfg) {
2092                         ap->ability_match_cfg = rx_cfg_reg;
2093                         ap->ability_match = 0;
2094                         ap->ability_match_count = 0;
2095                 } else {
2096                         if (++ap->ability_match_count > 1) {
2097                                 ap->ability_match = 1;
2098                                 ap->ability_match_cfg = rx_cfg_reg;
2099                         }
2100                 }
2101                 if (rx_cfg_reg & ANEG_CFG_ACK)
2102                         ap->ack_match = 1;
2103                 else
2104                         ap->ack_match = 0;
2105
2106                 ap->idle_match = 0;
2107         } else {
2108                 ap->idle_match = 1;
2109                 ap->ability_match_cfg = 0;
2110                 ap->ability_match_count = 0;
2111                 ap->ability_match = 0;
2112                 ap->ack_match = 0;
2113
2114                 rx_cfg_reg = 0;
2115         }
2116
2117         ap->rxconfig = rx_cfg_reg;
2118         ret = ANEG_OK;
2119
2120         switch(ap->state) {
2121         case ANEG_STATE_UNKNOWN:
2122                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2123                         ap->state = ANEG_STATE_AN_ENABLE;
2124
2125                 /* fallthru */
2126         case ANEG_STATE_AN_ENABLE:
2127                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2128                 if (ap->flags & MR_AN_ENABLE) {
2129                         ap->link_time = 0;
2130                         ap->cur_time = 0;
2131                         ap->ability_match_cfg = 0;
2132                         ap->ability_match_count = 0;
2133                         ap->ability_match = 0;
2134                         ap->idle_match = 0;
2135                         ap->ack_match = 0;
2136
2137                         ap->state = ANEG_STATE_RESTART_INIT;
2138                 } else {
2139                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2140                 }
2141                 break;
2142
2143         case ANEG_STATE_RESTART_INIT:
2144                 ap->link_time = ap->cur_time;
2145                 ap->flags &= ~(MR_NP_LOADED);
2146                 ap->txconfig = 0;
2147                 tw32(MAC_TX_AUTO_NEG, 0);
2148                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2149                 tw32_f(MAC_MODE, tp->mac_mode);
2150                 udelay(40);
2151
2152                 ret = ANEG_TIMER_ENAB;
2153                 ap->state = ANEG_STATE_RESTART;
2154
2155                 /* fallthru */
2156         case ANEG_STATE_RESTART:
2157                 delta = ap->cur_time - ap->link_time;
2158                 if (delta > ANEG_STATE_SETTLE_TIME) {
2159                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2160                 } else {
2161                         ret = ANEG_TIMER_ENAB;
2162                 }
2163                 break;
2164
2165         case ANEG_STATE_DISABLE_LINK_OK:
2166                 ret = ANEG_DONE;
2167                 break;
2168
2169         case ANEG_STATE_ABILITY_DETECT_INIT:
2170                 ap->flags &= ~(MR_TOGGLE_TX);
2171                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2172                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2173                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2174                 tw32_f(MAC_MODE, tp->mac_mode);
2175                 udelay(40);
2176
2177                 ap->state = ANEG_STATE_ABILITY_DETECT;
2178                 break;
2179
2180         case ANEG_STATE_ABILITY_DETECT:
2181                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2182                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2183                 }
2184                 break;
2185
2186         case ANEG_STATE_ACK_DETECT_INIT:
2187                 ap->txconfig |= ANEG_CFG_ACK;
2188                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2189                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2190                 tw32_f(MAC_MODE, tp->mac_mode);
2191                 udelay(40);
2192
2193                 ap->state = ANEG_STATE_ACK_DETECT;
2194
2195                 /* fallthru */
2196         case ANEG_STATE_ACK_DETECT:
2197                 if (ap->ack_match != 0) {
2198                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2199                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2200                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2201                         } else {
2202                                 ap->state = ANEG_STATE_AN_ENABLE;
2203                         }
2204                 } else if (ap->ability_match != 0 &&
2205                            ap->rxconfig == 0) {
2206                         ap->state = ANEG_STATE_AN_ENABLE;
2207                 }
2208                 break;
2209
2210         case ANEG_STATE_COMPLETE_ACK_INIT:
2211                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2212                         ret = ANEG_FAILED;
2213                         break;
2214                 }
2215                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2216                                MR_LP_ADV_HALF_DUPLEX |
2217                                MR_LP_ADV_SYM_PAUSE |
2218                                MR_LP_ADV_ASYM_PAUSE |
2219                                MR_LP_ADV_REMOTE_FAULT1 |
2220                                MR_LP_ADV_REMOTE_FAULT2 |
2221                                MR_LP_ADV_NEXT_PAGE |
2222                                MR_TOGGLE_RX |
2223                                MR_NP_RX);
2224                 if (ap->rxconfig & ANEG_CFG_FD)
2225                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2226                 if (ap->rxconfig & ANEG_CFG_HD)
2227                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2228                 if (ap->rxconfig & ANEG_CFG_PS1)
2229                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2230                 if (ap->rxconfig & ANEG_CFG_PS2)
2231                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2232                 if (ap->rxconfig & ANEG_CFG_RF1)
2233                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2234                 if (ap->rxconfig & ANEG_CFG_RF2)
2235                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2236                 if (ap->rxconfig & ANEG_CFG_NP)
2237                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2238
2239                 ap->link_time = ap->cur_time;
2240
2241                 ap->flags ^= (MR_TOGGLE_TX);
2242                 if (ap->rxconfig & 0x0008)
2243                         ap->flags |= MR_TOGGLE_RX;
2244                 if (ap->rxconfig & ANEG_CFG_NP)
2245                         ap->flags |= MR_NP_RX;
2246                 ap->flags |= MR_PAGE_RX;
2247
2248                 ap->state = ANEG_STATE_COMPLETE_ACK;
2249                 ret = ANEG_TIMER_ENAB;
2250                 break;
2251
2252         case ANEG_STATE_COMPLETE_ACK:
2253                 if (ap->ability_match != 0 &&
2254                     ap->rxconfig == 0) {
2255                         ap->state = ANEG_STATE_AN_ENABLE;
2256                         break;
2257                 }
2258                 delta = ap->cur_time - ap->link_time;
2259                 if (delta > ANEG_STATE_SETTLE_TIME) {
2260                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2261                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2262                         } else {
2263                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2264                                     !(ap->flags & MR_NP_RX)) {
2265                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2266                                 } else {
2267                                         ret = ANEG_FAILED;
2268                                 }
2269                         }
2270                 }
2271                 break;
2272
2273         case ANEG_STATE_IDLE_DETECT_INIT:
2274                 ap->link_time = ap->cur_time;
2275                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2276                 tw32_f(MAC_MODE, tp->mac_mode);
2277                 udelay(40);
2278
2279                 ap->state = ANEG_STATE_IDLE_DETECT;
2280                 ret = ANEG_TIMER_ENAB;
2281                 break;
2282
2283         case ANEG_STATE_IDLE_DETECT:
2284                 if (ap->ability_match != 0 &&
2285                     ap->rxconfig == 0) {
2286                         ap->state = ANEG_STATE_AN_ENABLE;
2287                         break;
2288                 }
2289                 delta = ap->cur_time - ap->link_time;
2290                 if (delta > ANEG_STATE_SETTLE_TIME) {
2291                         /* XXX another gem from the Broadcom driver :( */
2292                         ap->state = ANEG_STATE_LINK_OK;
2293                 }
2294                 break;
2295
2296         case ANEG_STATE_LINK_OK:
2297                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2298                 ret = ANEG_DONE;
2299                 break;
2300
2301         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2302                 /* ??? unimplemented */
2303                 break;
2304
2305         case ANEG_STATE_NEXT_PAGE_WAIT:
2306                 /* ??? unimplemented */
2307                 break;
2308
2309         default:
2310                 ret = ANEG_FAILED;
2311                 break;
2312         };
2313
2314         return ret;
2315 }
2316
2317 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2318 {
2319         int res = 0;
2320         struct tg3_fiber_aneginfo aninfo;
2321         int status = ANEG_FAILED;
2322         unsigned int tick;
2323         u32 tmp;
2324
2325         tw32_f(MAC_TX_AUTO_NEG, 0);
2326
2327         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2328         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2329         udelay(40);
2330
2331         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2332         udelay(40);
2333
2334         memset(&aninfo, 0, sizeof(aninfo));
2335         aninfo.flags |= MR_AN_ENABLE;
2336         aninfo.state = ANEG_STATE_UNKNOWN;
2337         aninfo.cur_time = 0;
2338         tick = 0;
2339         while (++tick < 195000) {
2340                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2341                 if (status == ANEG_DONE || status == ANEG_FAILED)
2342                         break;
2343
2344                 udelay(1);
2345         }
2346
2347         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2348         tw32_f(MAC_MODE, tp->mac_mode);
2349         udelay(40);
2350
2351         *flags = aninfo.flags;
2352
2353         if (status == ANEG_DONE &&
2354             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2355                              MR_LP_ADV_FULL_DUPLEX)))
2356                 res = 1;
2357
2358         return res;
2359 }
2360
2361 static void tg3_init_bcm8002(struct tg3 *tp)
2362 {
2363         u32 mac_status = tr32(MAC_STATUS);
2364         int i;
2365
2366         /* Reset when initting first time or we have a link. */
2367         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2368             !(mac_status & MAC_STATUS_PCS_SYNCED))
2369                 return;
2370
2371         /* Set PLL lock range. */
2372         tg3_writephy(tp, 0x16, 0x8007);
2373
2374         /* SW reset */
2375         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2376
2377         /* Wait for reset to complete. */
2378         /* XXX schedule_timeout() ... */
2379         for (i = 0; i < 500; i++)
2380                 udelay(10);
2381
2382         /* Config mode; select PMA/Ch 1 regs. */
2383         tg3_writephy(tp, 0x10, 0x8411);
2384
2385         /* Enable auto-lock and comdet, select txclk for tx. */
2386         tg3_writephy(tp, 0x11, 0x0a10);
2387
2388         tg3_writephy(tp, 0x18, 0x00a0);
2389         tg3_writephy(tp, 0x16, 0x41ff);
2390
2391         /* Assert and deassert POR. */
2392         tg3_writephy(tp, 0x13, 0x0400);
2393         udelay(40);
2394         tg3_writephy(tp, 0x13, 0x0000);
2395
2396         tg3_writephy(tp, 0x11, 0x0a50);
2397         udelay(40);
2398         tg3_writephy(tp, 0x11, 0x0a10);
2399
2400         /* Wait for signal to stabilize */
2401         /* XXX schedule_timeout() ... */
2402         for (i = 0; i < 15000; i++)
2403                 udelay(10);
2404
2405         /* Deselect the channel register so we can read the PHYID
2406          * later.
2407          */
2408         tg3_writephy(tp, 0x10, 0x8011);
2409 }
2410
2411 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2412 {
2413         u32 sg_dig_ctrl, sg_dig_status;
2414         u32 serdes_cfg, expected_sg_dig_ctrl;
2415         int workaround, port_a;
2416         int current_link_up;
2417
2418         serdes_cfg = 0;
2419         expected_sg_dig_ctrl = 0;
2420         workaround = 0;
2421         port_a = 1;
2422         current_link_up = 0;
2423
2424         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2425             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2426                 workaround = 1;
2427                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2428                         port_a = 0;
2429
2430                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2431                 /* preserve bits 20-23 for voltage regulator */
2432                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2433         }
2434
2435         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2436
2437         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2438                 if (sg_dig_ctrl & (1 << 31)) {
2439                         if (workaround) {
2440                                 u32 val = serdes_cfg;
2441
2442                                 if (port_a)
2443                                         val |= 0xc010000;
2444                                 else
2445                                         val |= 0x4010000;
2446                                 tw32_f(MAC_SERDES_CFG, val);
2447                         }
2448                         tw32_f(SG_DIG_CTRL, 0x01388400);
2449                 }
2450                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2451                         tg3_setup_flow_control(tp, 0, 0);
2452                         current_link_up = 1;
2453                 }
2454                 goto out;
2455         }
2456
2457         /* Want auto-negotiation.  */
2458         expected_sg_dig_ctrl = 0x81388400;
2459
2460         /* Pause capability */
2461         expected_sg_dig_ctrl |= (1 << 11);
2462
2463         /* Asymettric pause */
2464         expected_sg_dig_ctrl |= (1 << 12);
2465
2466         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2467                 if (workaround)
2468                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2469                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2470                 udelay(5);
2471                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2472
2473                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2474         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2475                                  MAC_STATUS_SIGNAL_DET)) {
2476                 int i;
2477
2478                 /* Giver time to negotiate (~200ms) */
2479                 for (i = 0; i < 40000; i++) {
2480                         sg_dig_status = tr32(SG_DIG_STATUS);
2481                         if (sg_dig_status & (0x3))
2482                                 break;
2483                         udelay(5);
2484                 }
2485                 mac_status = tr32(MAC_STATUS);
2486
2487                 if ((sg_dig_status & (1 << 1)) &&
2488                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2489                         u32 local_adv, remote_adv;
2490
2491                         local_adv = ADVERTISE_PAUSE_CAP;
2492                         remote_adv = 0;
2493                         if (sg_dig_status & (1 << 19))
2494                                 remote_adv |= LPA_PAUSE_CAP;
2495                         if (sg_dig_status & (1 << 20))
2496                                 remote_adv |= LPA_PAUSE_ASYM;
2497
2498                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2499                         current_link_up = 1;
2500                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2501                 } else if (!(sg_dig_status & (1 << 1))) {
2502                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2503                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2504                         else {
2505                                 if (workaround) {
2506                                         u32 val = serdes_cfg;
2507
2508                                         if (port_a)
2509                                                 val |= 0xc010000;
2510                                         else
2511                                                 val |= 0x4010000;
2512
2513                                         tw32_f(MAC_SERDES_CFG, val);
2514                                 }
2515
2516                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2517                                 udelay(40);
2518
2519                                 /* Link parallel detection - link is up */
2520                                 /* only if we have PCS_SYNC and not */
2521                                 /* receiving config code words */
2522                                 mac_status = tr32(MAC_STATUS);
2523                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2524                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2525                                         tg3_setup_flow_control(tp, 0, 0);
2526                                         current_link_up = 1;
2527                                 }
2528                         }
2529                 }
2530         }
2531
2532 out:
2533         return current_link_up;
2534 }
2535
2536 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2537 {
2538         int current_link_up = 0;
2539
2540         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2541                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2542                 goto out;
2543         }
2544
2545         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2546                 u32 flags;
2547                 int i;
2548   
2549                 if (fiber_autoneg(tp, &flags)) {
2550                         u32 local_adv, remote_adv;
2551
2552                         local_adv = ADVERTISE_PAUSE_CAP;
2553                         remote_adv = 0;
2554                         if (flags & MR_LP_ADV_SYM_PAUSE)
2555                                 remote_adv |= LPA_PAUSE_CAP;
2556                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2557                                 remote_adv |= LPA_PAUSE_ASYM;
2558
2559                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2560
2561                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2562                         current_link_up = 1;
2563                 }
2564                 for (i = 0; i < 30; i++) {
2565                         udelay(20);
2566                         tw32_f(MAC_STATUS,
2567                                (MAC_STATUS_SYNC_CHANGED |
2568                                 MAC_STATUS_CFG_CHANGED));
2569                         udelay(40);
2570                         if ((tr32(MAC_STATUS) &
2571                              (MAC_STATUS_SYNC_CHANGED |
2572                               MAC_STATUS_CFG_CHANGED)) == 0)
2573                                 break;
2574                 }
2575
2576                 mac_status = tr32(MAC_STATUS);
2577                 if (current_link_up == 0 &&
2578                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2579                     !(mac_status & MAC_STATUS_RCVD_CFG))
2580                         current_link_up = 1;
2581         } else {
2582                 /* Forcing 1000FD link up. */
2583                 current_link_up = 1;
2584                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2585
2586                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2587                 udelay(40);
2588         }
2589
2590 out:
2591         return current_link_up;
2592 }
2593
2594 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2595 {
2596         u32 orig_pause_cfg;
2597         u16 orig_active_speed;
2598         u8 orig_active_duplex;
2599         u32 mac_status;
2600         int current_link_up;
2601         int i;
2602
2603         orig_pause_cfg =
2604                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2605                                   TG3_FLAG_TX_PAUSE));
2606         orig_active_speed = tp->link_config.active_speed;
2607         orig_active_duplex = tp->link_config.active_duplex;
2608
2609         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2610             netif_carrier_ok(tp->dev) &&
2611             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2612                 mac_status = tr32(MAC_STATUS);
2613                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2614                                MAC_STATUS_SIGNAL_DET |
2615                                MAC_STATUS_CFG_CHANGED |
2616                                MAC_STATUS_RCVD_CFG);
2617                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2618                                    MAC_STATUS_SIGNAL_DET)) {
2619                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2620                                             MAC_STATUS_CFG_CHANGED));
2621                         return 0;
2622                 }
2623         }
2624
2625         tw32_f(MAC_TX_AUTO_NEG, 0);
2626
2627         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2628         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2629         tw32_f(MAC_MODE, tp->mac_mode);
2630         udelay(40);
2631
2632         if (tp->phy_id == PHY_ID_BCM8002)
2633                 tg3_init_bcm8002(tp);
2634
2635         /* Enable link change event even when serdes polling.  */
2636         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2637         udelay(40);
2638
2639         current_link_up = 0;
2640         mac_status = tr32(MAC_STATUS);
2641
2642         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2643                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2644         else
2645                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2646
2647         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2648         tw32_f(MAC_MODE, tp->mac_mode);
2649         udelay(40);
2650
2651         tp->hw_status->status =
2652                 (SD_STATUS_UPDATED |
2653                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2654
2655         for (i = 0; i < 100; i++) {
2656                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2657                                     MAC_STATUS_CFG_CHANGED));
2658                 udelay(5);
2659                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2660                                          MAC_STATUS_CFG_CHANGED)) == 0)
2661                         break;
2662         }
2663
2664         mac_status = tr32(MAC_STATUS);
2665         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2666                 current_link_up = 0;
2667                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2668                         tw32_f(MAC_MODE, (tp->mac_mode |
2669                                           MAC_MODE_SEND_CONFIGS));
2670                         udelay(1);
2671                         tw32_f(MAC_MODE, tp->mac_mode);
2672                 }
2673         }
2674
2675         if (current_link_up == 1) {
2676                 tp->link_config.active_speed = SPEED_1000;
2677                 tp->link_config.active_duplex = DUPLEX_FULL;
2678                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2679                                     LED_CTRL_LNKLED_OVERRIDE |
2680                                     LED_CTRL_1000MBPS_ON));
2681         } else {
2682                 tp->link_config.active_speed = SPEED_INVALID;
2683                 tp->link_config.active_duplex = DUPLEX_INVALID;
2684                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2685                                     LED_CTRL_LNKLED_OVERRIDE |
2686                                     LED_CTRL_TRAFFIC_OVERRIDE));
2687         }
2688
2689         if (current_link_up != netif_carrier_ok(tp->dev)) {
2690                 if (current_link_up)
2691                         netif_carrier_on(tp->dev);
2692                 else
2693                         netif_carrier_off(tp->dev);
2694                 tg3_link_report(tp);
2695         } else {
2696                 u32 now_pause_cfg =
2697                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2698                                          TG3_FLAG_TX_PAUSE);
2699                 if (orig_pause_cfg != now_pause_cfg ||
2700                     orig_active_speed != tp->link_config.active_speed ||
2701                     orig_active_duplex != tp->link_config.active_duplex)
2702                         tg3_link_report(tp);
2703         }
2704
2705         return 0;
2706 }
2707
2708 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2709 {
2710         int current_link_up, err = 0;
2711         u32 bmsr, bmcr;
2712         u16 current_speed;
2713         u8 current_duplex;
2714
2715         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2716         tw32_f(MAC_MODE, tp->mac_mode);
2717         udelay(40);
2718
2719         tw32(MAC_EVENT, 0);
2720
2721         tw32_f(MAC_STATUS,
2722              (MAC_STATUS_SYNC_CHANGED |
2723               MAC_STATUS_CFG_CHANGED |
2724               MAC_STATUS_MI_COMPLETION |
2725               MAC_STATUS_LNKSTATE_CHANGED));
2726         udelay(40);
2727
2728         if (force_reset)
2729                 tg3_phy_reset(tp);
2730
2731         current_link_up = 0;
2732         current_speed = SPEED_INVALID;
2733         current_duplex = DUPLEX_INVALID;
2734
2735         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2736         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2737         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2738                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2739                         bmsr |= BMSR_LSTATUS;
2740                 else
2741                         bmsr &= ~BMSR_LSTATUS;
2742         }
2743
2744         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2745
2746         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2747             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2748                 /* do nothing, just check for link up at the end */
2749         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2750                 u32 adv, new_adv;
2751
2752                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2753                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2754                                   ADVERTISE_1000XPAUSE |
2755                                   ADVERTISE_1000XPSE_ASYM |
2756                                   ADVERTISE_SLCT);
2757
2758                 /* Always advertise symmetric PAUSE just like copper */
2759                 new_adv |= ADVERTISE_1000XPAUSE;
2760
2761                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2762                         new_adv |= ADVERTISE_1000XHALF;
2763                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2764                         new_adv |= ADVERTISE_1000XFULL;
2765
2766                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2767                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2768                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2769                         tg3_writephy(tp, MII_BMCR, bmcr);
2770
2771                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2772                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2773                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2774
2775                         return err;
2776                 }
2777         } else {
2778                 u32 new_bmcr;
2779
2780                 bmcr &= ~BMCR_SPEED1000;
2781                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2782
2783                 if (tp->link_config.duplex == DUPLEX_FULL)
2784                         new_bmcr |= BMCR_FULLDPLX;
2785
2786                 if (new_bmcr != bmcr) {
2787                         /* BMCR_SPEED1000 is a reserved bit that needs
2788                          * to be set on write.
2789                          */
2790                         new_bmcr |= BMCR_SPEED1000;
2791
2792                         /* Force a linkdown */
2793                         if (netif_carrier_ok(tp->dev)) {
2794                                 u32 adv;
2795
2796                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2797                                 adv &= ~(ADVERTISE_1000XFULL |
2798                                          ADVERTISE_1000XHALF |
2799                                          ADVERTISE_SLCT);
2800                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2801                                 tg3_writephy(tp, MII_BMCR, bmcr |
2802                                                            BMCR_ANRESTART |
2803                                                            BMCR_ANENABLE);
2804                                 udelay(10);
2805                                 netif_carrier_off(tp->dev);
2806                         }
2807                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2808                         bmcr = new_bmcr;
2809                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2810                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2811                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2812                             ASIC_REV_5714) {
2813                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2814                                         bmsr |= BMSR_LSTATUS;
2815                                 else
2816                                         bmsr &= ~BMSR_LSTATUS;
2817                         }
2818                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2819                 }
2820         }
2821
2822         if (bmsr & BMSR_LSTATUS) {
2823                 current_speed = SPEED_1000;
2824                 current_link_up = 1;
2825                 if (bmcr & BMCR_FULLDPLX)
2826                         current_duplex = DUPLEX_FULL;
2827                 else
2828                         current_duplex = DUPLEX_HALF;
2829
2830                 if (bmcr & BMCR_ANENABLE) {
2831                         u32 local_adv, remote_adv, common;
2832
2833                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2834                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2835                         common = local_adv & remote_adv;
2836                         if (common & (ADVERTISE_1000XHALF |
2837                                       ADVERTISE_1000XFULL)) {
2838                                 if (common & ADVERTISE_1000XFULL)
2839                                         current_duplex = DUPLEX_FULL;
2840                                 else
2841                                         current_duplex = DUPLEX_HALF;
2842
2843                                 tg3_setup_flow_control(tp, local_adv,
2844                                                        remote_adv);
2845                         }
2846                         else
2847                                 current_link_up = 0;
2848                 }
2849         }
2850
2851         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2852         if (tp->link_config.active_duplex == DUPLEX_HALF)
2853                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2854
2855         tw32_f(MAC_MODE, tp->mac_mode);
2856         udelay(40);
2857
2858         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2859
2860         tp->link_config.active_speed = current_speed;
2861         tp->link_config.active_duplex = current_duplex;
2862
2863         if (current_link_up != netif_carrier_ok(tp->dev)) {
2864                 if (current_link_up)
2865                         netif_carrier_on(tp->dev);
2866                 else {
2867                         netif_carrier_off(tp->dev);
2868                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2869                 }
2870                 tg3_link_report(tp);
2871         }
2872         return err;
2873 }
2874
2875 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2876 {
2877         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2878                 /* Give autoneg time to complete. */
2879                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2880                 return;
2881         }
2882         if (!netif_carrier_ok(tp->dev) &&
2883             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2884                 u32 bmcr;
2885
2886                 tg3_readphy(tp, MII_BMCR, &bmcr);
2887                 if (bmcr & BMCR_ANENABLE) {
2888                         u32 phy1, phy2;
2889
2890                         /* Select shadow register 0x1f */
2891                         tg3_writephy(tp, 0x1c, 0x7c00);
2892                         tg3_readphy(tp, 0x1c, &phy1);
2893
2894                         /* Select expansion interrupt status register */
2895                         tg3_writephy(tp, 0x17, 0x0f01);
2896                         tg3_readphy(tp, 0x15, &phy2);
2897                         tg3_readphy(tp, 0x15, &phy2);
2898
2899                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2900                                 /* We have signal detect and not receiving
2901                                  * config code words, link is up by parallel
2902                                  * detection.
2903                                  */
2904
2905                                 bmcr &= ~BMCR_ANENABLE;
2906                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2907                                 tg3_writephy(tp, MII_BMCR, bmcr);
2908                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2909                         }
2910                 }
2911         }
2912         else if (netif_carrier_ok(tp->dev) &&
2913                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2914                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2915                 u32 phy2;
2916
2917                 /* Select expansion interrupt status register */
2918                 tg3_writephy(tp, 0x17, 0x0f01);
2919                 tg3_readphy(tp, 0x15, &phy2);
2920                 if (phy2 & 0x20) {
2921                         u32 bmcr;
2922
2923                         /* Config code words received, turn on autoneg. */
2924                         tg3_readphy(tp, MII_BMCR, &bmcr);
2925                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2926
2927                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2928
2929                 }
2930         }
2931 }
2932
2933 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2934 {
2935         int err;
2936
2937         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2938                 err = tg3_setup_fiber_phy(tp, force_reset);
2939         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2940                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2941         } else {
2942                 err = tg3_setup_copper_phy(tp, force_reset);
2943         }
2944
2945         if (tp->link_config.active_speed == SPEED_1000 &&
2946             tp->link_config.active_duplex == DUPLEX_HALF)
2947                 tw32(MAC_TX_LENGTHS,
2948                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2949                       (6 << TX_LENGTHS_IPG_SHIFT) |
2950                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2951         else
2952                 tw32(MAC_TX_LENGTHS,
2953                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2954                       (6 << TX_LENGTHS_IPG_SHIFT) |
2955                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2956
2957         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2958                 if (netif_carrier_ok(tp->dev)) {
2959                         tw32(HOSTCC_STAT_COAL_TICKS,
2960                              tp->coal.stats_block_coalesce_usecs);
2961                 } else {
2962                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2963                 }
2964         }
2965
2966         return err;
2967 }
2968
2969 /* This is called whenever we suspect that the system chipset is re-
2970  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2971  * is bogus tx completions. We try to recover by setting the
2972  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2973  * in the workqueue.
2974  */
2975 static void tg3_tx_recover(struct tg3 *tp)
2976 {
2977         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2978                tp->write32_tx_mbox == tg3_write_indirect_mbox);
2979
2980         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2981                "mapped I/O cycles to the network device, attempting to "
2982                "recover. Please report the problem to the driver maintainer "
2983                "and include system chipset information.\n", tp->dev->name);
2984
2985         spin_lock(&tp->lock);
2986         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2987         spin_unlock(&tp->lock);
2988 }
2989
2990 /* Tigon3 never reports partial packet sends.  So we do not
2991  * need special logic to handle SKBs that have not had all
2992  * of their frags sent yet, like SunGEM does.
2993  */
2994 static void tg3_tx(struct tg3 *tp)
2995 {
2996         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2997         u32 sw_idx = tp->tx_cons;
2998
2999         while (sw_idx != hw_idx) {
3000                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3001                 struct sk_buff *skb = ri->skb;
3002                 int i, tx_bug = 0;
3003
3004                 if (unlikely(skb == NULL)) {
3005                         tg3_tx_recover(tp);
3006                         return;
3007                 }
3008
3009                 pci_unmap_single(tp->pdev,
3010                                  pci_unmap_addr(ri, mapping),
3011                                  skb_headlen(skb),
3012                                  PCI_DMA_TODEVICE);
3013
3014                 ri->skb = NULL;
3015
3016                 sw_idx = NEXT_TX(sw_idx);
3017
3018                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3019                         ri = &tp->tx_buffers[sw_idx];
3020                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3021                                 tx_bug = 1;
3022
3023                         pci_unmap_page(tp->pdev,
3024                                        pci_unmap_addr(ri, mapping),
3025                                        skb_shinfo(skb)->frags[i].size,
3026                                        PCI_DMA_TODEVICE);
3027
3028                         sw_idx = NEXT_TX(sw_idx);
3029                 }
3030
3031                 dev_kfree_skb(skb);
3032
3033                 if (unlikely(tx_bug)) {
3034                         tg3_tx_recover(tp);
3035                         return;
3036                 }
3037         }
3038
3039         tp->tx_cons = sw_idx;
3040
3041         if (unlikely(netif_queue_stopped(tp->dev))) {
3042                 spin_lock(&tp->tx_lock);
3043                 if (netif_queue_stopped(tp->dev) &&
3044                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3045                         netif_wake_queue(tp->dev);
3046                 spin_unlock(&tp->tx_lock);
3047         }
3048 }
3049
3050 /* Returns size of skb allocated or < 0 on error.
3051  *
3052  * We only need to fill in the address because the other members
3053  * of the RX descriptor are invariant, see tg3_init_rings.
3054  *
3055  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3056  * posting buffers we only dirty the first cache line of the RX
3057  * descriptor (containing the address).  Whereas for the RX status
3058  * buffers the cpu only reads the last cacheline of the RX descriptor
3059  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3060  */
3061 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3062                             int src_idx, u32 dest_idx_unmasked)
3063 {
3064         struct tg3_rx_buffer_desc *desc;
3065         struct ring_info *map, *src_map;
3066         struct sk_buff *skb;
3067         dma_addr_t mapping;
3068         int skb_size, dest_idx;
3069
3070         src_map = NULL;
3071         switch (opaque_key) {
3072         case RXD_OPAQUE_RING_STD:
3073                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3074                 desc = &tp->rx_std[dest_idx];
3075                 map = &tp->rx_std_buffers[dest_idx];
3076                 if (src_idx >= 0)
3077                         src_map = &tp->rx_std_buffers[src_idx];
3078                 skb_size = tp->rx_pkt_buf_sz;
3079                 break;
3080
3081         case RXD_OPAQUE_RING_JUMBO:
3082                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3083                 desc = &tp->rx_jumbo[dest_idx];
3084                 map = &tp->rx_jumbo_buffers[dest_idx];
3085                 if (src_idx >= 0)
3086                         src_map = &tp->rx_jumbo_buffers[src_idx];
3087                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3088                 break;
3089
3090         default:
3091                 return -EINVAL;
3092         };
3093
3094         /* Do not overwrite any of the map or rp information
3095          * until we are sure we can commit to a new buffer.
3096          *
3097          * Callers depend upon this behavior and assume that
3098          * we leave everything unchanged if we fail.
3099          */
3100         skb = netdev_alloc_skb(tp->dev, skb_size);
3101         if (skb == NULL)
3102                 return -ENOMEM;
3103
3104         skb_reserve(skb, tp->rx_offset);
3105
3106         mapping = pci_map_single(tp->pdev, skb->data,
3107                                  skb_size - tp->rx_offset,
3108                                  PCI_DMA_FROMDEVICE);
3109
3110         map->skb = skb;
3111         pci_unmap_addr_set(map, mapping, mapping);
3112
3113         if (src_map != NULL)
3114                 src_map->skb = NULL;
3115
3116         desc->addr_hi = ((u64)mapping >> 32);
3117         desc->addr_lo = ((u64)mapping & 0xffffffff);
3118
3119         return skb_size;
3120 }
3121
3122 /* We only need to move over in the address because the other
3123  * members of the RX descriptor are invariant.  See notes above
3124  * tg3_alloc_rx_skb for full details.
3125  */
3126 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3127                            int src_idx, u32 dest_idx_unmasked)
3128 {
3129         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3130         struct ring_info *src_map, *dest_map;
3131         int dest_idx;
3132
3133         switch (opaque_key) {
3134         case RXD_OPAQUE_RING_STD:
3135                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3136                 dest_desc = &tp->rx_std[dest_idx];
3137                 dest_map = &tp->rx_std_buffers[dest_idx];
3138                 src_desc = &tp->rx_std[src_idx];
3139                 src_map = &tp->rx_std_buffers[src_idx];
3140                 break;
3141
3142         case RXD_OPAQUE_RING_JUMBO:
3143                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3144                 dest_desc = &tp->rx_jumbo[dest_idx];
3145                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3146                 src_desc = &tp->rx_jumbo[src_idx];
3147                 src_map = &tp->rx_jumbo_buffers[src_idx];
3148                 break;
3149
3150         default:
3151                 return;
3152         };
3153
3154         dest_map->skb = src_map->skb;
3155         pci_unmap_addr_set(dest_map, mapping,
3156                            pci_unmap_addr(src_map, mapping));
3157         dest_desc->addr_hi = src_desc->addr_hi;
3158         dest_desc->addr_lo = src_desc->addr_lo;
3159
3160         src_map->skb = NULL;
3161 }
3162
3163 #if TG3_VLAN_TAG_USED
3164 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3165 {
3166         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3167 }
3168 #endif
3169
3170 /* The RX ring scheme is composed of multiple rings which post fresh
3171  * buffers to the chip, and one special ring the chip uses to report
3172  * status back to the host.
3173  *
3174  * The special ring reports the status of received packets to the
3175  * host.  The chip does not write into the original descriptor the
3176  * RX buffer was obtained from.  The chip simply takes the original
3177  * descriptor as provided by the host, updates the status and length
3178  * field, then writes this into the next status ring entry.
3179  *
3180  * Each ring the host uses to post buffers to the chip is described
3181  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3182  * it is first placed into the on-chip ram.  When the packet's length
3183  * is known, it walks down the TG3_BDINFO entries to select the ring.
3184  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3185  * which is within the range of the new packet's length is chosen.
3186  *
3187  * The "separate ring for rx status" scheme may sound queer, but it makes
3188  * sense from a cache coherency perspective.  If only the host writes
3189  * to the buffer post rings, and only the chip writes to the rx status
3190  * rings, then cache lines never move beyond shared-modified state.
3191  * If both the host and chip were to write into the same ring, cache line
3192  * eviction could occur since both entities want it in an exclusive state.
3193  */
3194 static int tg3_rx(struct tg3 *tp, int budget)
3195 {
3196         u32 work_mask, rx_std_posted = 0;
3197         u32 sw_idx = tp->rx_rcb_ptr;
3198         u16 hw_idx;
3199         int received;
3200
3201         hw_idx = tp->hw_status->idx[0].rx_producer;
3202         /*
3203          * We need to order the read of hw_idx and the read of
3204          * the opaque cookie.
3205          */
3206         rmb();
3207         work_mask = 0;
3208         received = 0;
3209         while (sw_idx != hw_idx && budget > 0) {
3210                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3211                 unsigned int len;
3212                 struct sk_buff *skb;
3213                 dma_addr_t dma_addr;
3214                 u32 opaque_key, desc_idx, *post_ptr;
3215
3216                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3217                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3218                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3219                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3220                                                   mapping);
3221                         skb = tp->rx_std_buffers[desc_idx].skb;
3222                         post_ptr = &tp->rx_std_ptr;
3223                         rx_std_posted++;
3224                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3225                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3226                                                   mapping);
3227                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3228                         post_ptr = &tp->rx_jumbo_ptr;
3229                 }
3230                 else {
3231                         goto next_pkt_nopost;
3232                 }
3233
3234                 work_mask |= opaque_key;
3235
3236                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3237                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3238                 drop_it:
3239                         tg3_recycle_rx(tp, opaque_key,
3240                                        desc_idx, *post_ptr);
3241                 drop_it_no_recycle:
3242                         /* Other statistics kept track of by card. */
3243                         tp->net_stats.rx_dropped++;
3244                         goto next_pkt;
3245                 }
3246
3247                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3248
3249                 if (len > RX_COPY_THRESHOLD 
3250                         && tp->rx_offset == 2
3251                         /* rx_offset != 2 iff this is a 5701 card running
3252                          * in PCI-X mode [see tg3_get_invariants()] */
3253                 ) {
3254                         int skb_size;
3255
3256                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3257                                                     desc_idx, *post_ptr);
3258                         if (skb_size < 0)
3259                                 goto drop_it;
3260
3261                         pci_unmap_single(tp->pdev, dma_addr,
3262                                          skb_size - tp->rx_offset,
3263                                          PCI_DMA_FROMDEVICE);
3264
3265                         skb_put(skb, len);
3266                 } else {
3267                         struct sk_buff *copy_skb;
3268
3269                         tg3_recycle_rx(tp, opaque_key,
3270                                        desc_idx, *post_ptr);
3271
3272                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3273                         if (copy_skb == NULL)
3274                                 goto drop_it_no_recycle;
3275
3276                         skb_reserve(copy_skb, 2);
3277                         skb_put(copy_skb, len);
3278                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3279                         memcpy(copy_skb->data, skb->data, len);
3280                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3281
3282                         /* We'll reuse the original ring buffer. */
3283                         skb = copy_skb;
3284                 }
3285
3286                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3287                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3288                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3289                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3290                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3291                 else
3292                         skb->ip_summed = CHECKSUM_NONE;
3293
3294                 skb->protocol = eth_type_trans(skb, tp->dev);
3295 #if TG3_VLAN_TAG_USED
3296                 if (tp->vlgrp != NULL &&
3297                     desc->type_flags & RXD_FLAG_VLAN) {
3298                         tg3_vlan_rx(tp, skb,
3299                                     desc->err_vlan & RXD_VLAN_MASK);
3300                 } else
3301 #endif
3302                         netif_receive_skb(skb);
3303
3304                 tp->dev->last_rx = jiffies;
3305                 received++;
3306                 budget--;
3307
3308 next_pkt:
3309                 (*post_ptr)++;
3310
3311                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3312                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3313
3314                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3315                                      TG3_64BIT_REG_LOW, idx);
3316                         work_mask &= ~RXD_OPAQUE_RING_STD;
3317                         rx_std_posted = 0;
3318                 }
3319 next_pkt_nopost:
3320                 sw_idx++;
3321                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3322
3323                 /* Refresh hw_idx to see if there is new work */
3324                 if (sw_idx == hw_idx) {
3325                         hw_idx = tp->hw_status->idx[0].rx_producer;
3326                         rmb();
3327                 }
3328         }
3329
3330         /* ACK the status ring. */
3331         tp->rx_rcb_ptr = sw_idx;
3332         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3333
3334         /* Refill RX ring(s). */
3335         if (work_mask & RXD_OPAQUE_RING_STD) {
3336                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3337                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3338                              sw_idx);
3339         }
3340         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3341                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3342                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3343                              sw_idx);
3344         }
3345         mmiowb();
3346
3347         return received;
3348 }
3349
3350 static int tg3_poll(struct net_device *netdev, int *budget)
3351 {
3352         struct tg3 *tp = netdev_priv(netdev);
3353         struct tg3_hw_status *sblk = tp->hw_status;
3354         int done;
3355
3356         /* handle link change and other phy events */
3357         if (!(tp->tg3_flags &
3358               (TG3_FLAG_USE_LINKCHG_REG |
3359                TG3_FLAG_POLL_SERDES))) {
3360                 if (sblk->status & SD_STATUS_LINK_CHG) {
3361                         sblk->status = SD_STATUS_UPDATED |
3362                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3363                         spin_lock(&tp->lock);
3364                         tg3_setup_phy(tp, 0);
3365                         spin_unlock(&tp->lock);
3366                 }
3367         }
3368
3369         /* run TX completion thread */
3370         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3371                 tg3_tx(tp);
3372                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3373                         netif_rx_complete(netdev);
3374                         schedule_work(&tp->reset_task);
3375                         return 0;
3376                 }
3377         }
3378
3379         /* run RX thread, within the bounds set by NAPI.
3380          * All RX "locking" is done by ensuring outside
3381          * code synchronizes with dev->poll()
3382          */
3383         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3384                 int orig_budget = *budget;
3385                 int work_done;
3386
3387                 if (orig_budget > netdev->quota)
3388                         orig_budget = netdev->quota;
3389
3390                 work_done = tg3_rx(tp, orig_budget);
3391
3392                 *budget -= work_done;
3393                 netdev->quota -= work_done;
3394         }
3395
3396         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3397                 tp->last_tag = sblk->status_tag;
3398                 rmb();
3399         } else
3400                 sblk->status &= ~SD_STATUS_UPDATED;
3401
3402         /* if no more work, tell net stack and NIC we're done */
3403         done = !tg3_has_work(tp);
3404         if (done) {
3405                 netif_rx_complete(netdev);
3406                 tg3_restart_ints(tp);
3407         }
3408
3409         return (done ? 0 : 1);
3410 }
3411
3412 static void tg3_irq_quiesce(struct tg3 *tp)
3413 {
3414         BUG_ON(tp->irq_sync);
3415
3416         tp->irq_sync = 1;
3417         smp_mb();
3418
3419         synchronize_irq(tp->pdev->irq);
3420 }
3421
3422 static inline int tg3_irq_sync(struct tg3 *tp)
3423 {
3424         return tp->irq_sync;
3425 }
3426
3427 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3428  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3429  * with as well.  Most of the time, this is not necessary except when
3430  * shutting down the device.
3431  */
3432 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3433 {
3434         if (irq_sync)
3435                 tg3_irq_quiesce(tp);
3436         spin_lock_bh(&tp->lock);
3437 }
3438
3439 static inline void tg3_full_unlock(struct tg3 *tp)
3440 {
3441         spin_unlock_bh(&tp->lock);
3442 }
3443
3444 /* One-shot MSI handler - Chip automatically disables interrupt
3445  * after sending MSI so driver doesn't have to do it.
3446  */
3447 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3448 {
3449         struct net_device *dev = dev_id;
3450         struct tg3 *tp = netdev_priv(dev);
3451
3452         prefetch(tp->hw_status);
3453         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3454
3455         if (likely(!tg3_irq_sync(tp)))
3456                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3457
3458         return IRQ_HANDLED;
3459 }
3460
3461 /* MSI ISR - No need to check for interrupt sharing and no need to
3462  * flush status block and interrupt mailbox. PCI ordering rules
3463  * guarantee that MSI will arrive after the status block.
3464  */
3465 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3466 {
3467         struct net_device *dev = dev_id;
3468         struct tg3 *tp = netdev_priv(dev);
3469
3470         prefetch(tp->hw_status);
3471         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3472         /*
3473          * Writing any value to intr-mbox-0 clears PCI INTA# and
3474          * chip-internal interrupt pending events.
3475          * Writing non-zero to intr-mbox-0 additional tells the
3476          * NIC to stop sending us irqs, engaging "in-intr-handler"
3477          * event coalescing.
3478          */
3479         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3480         if (likely(!tg3_irq_sync(tp)))
3481                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3482
3483         return IRQ_RETVAL(1);
3484 }
3485
3486 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3487 {
3488         struct net_device *dev = dev_id;
3489         struct tg3 *tp = netdev_priv(dev);
3490         struct tg3_hw_status *sblk = tp->hw_status;
3491         unsigned int handled = 1;
3492
3493         /* In INTx mode, it is possible for the interrupt to arrive at
3494          * the CPU before the status block posted prior to the interrupt.
3495          * Reading the PCI State register will confirm whether the
3496          * interrupt is ours and will flush the status block.
3497          */
3498         if ((sblk->status & SD_STATUS_UPDATED) ||
3499             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3500                 /*
3501                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3502                  * chip-internal interrupt pending events.
3503                  * Writing non-zero to intr-mbox-0 additional tells the
3504                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3505                  * event coalescing.
3506                  */
3507                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3508                              0x00000001);
3509                 if (tg3_irq_sync(tp))
3510                         goto out;
3511                 sblk->status &= ~SD_STATUS_UPDATED;
3512                 if (likely(tg3_has_work(tp))) {
3513                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3514                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3515                 } else {
3516                         /* No work, shared interrupt perhaps?  re-enable
3517                          * interrupts, and flush that PCI write
3518                          */
3519                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3520                                 0x00000000);
3521                 }
3522         } else {        /* shared interrupt */
3523                 handled = 0;
3524         }
3525 out:
3526         return IRQ_RETVAL(handled);
3527 }
3528
3529 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3530 {
3531         struct net_device *dev = dev_id;
3532         struct tg3 *tp = netdev_priv(dev);
3533         struct tg3_hw_status *sblk = tp->hw_status;
3534         unsigned int handled = 1;
3535
3536         /* In INTx mode, it is possible for the interrupt to arrive at
3537          * the CPU before the status block posted prior to the interrupt.
3538          * Reading the PCI State register will confirm whether the
3539          * interrupt is ours and will flush the status block.
3540          */
3541         if ((sblk->status_tag != tp->last_tag) ||
3542             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3543                 /*
3544                  * writing any value to intr-mbox-0 clears PCI INTA# and
3545                  * chip-internal interrupt pending events.
3546                  * writing non-zero to intr-mbox-0 additional tells the
3547                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3548                  * event coalescing.
3549                  */
3550                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3551                              0x00000001);
3552                 if (tg3_irq_sync(tp))
3553                         goto out;
3554                 if (netif_rx_schedule_prep(dev)) {
3555                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3556                         /* Update last_tag to mark that this status has been
3557                          * seen. Because interrupt may be shared, we may be
3558                          * racing with tg3_poll(), so only update last_tag
3559                          * if tg3_poll() is not scheduled.
3560                          */
3561                         tp->last_tag = sblk->status_tag;
3562                         __netif_rx_schedule(dev);
3563                 }
3564         } else {        /* shared interrupt */
3565                 handled = 0;
3566         }
3567 out:
3568         return IRQ_RETVAL(handled);
3569 }
3570
3571 /* ISR for interrupt test */
3572 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3573                 struct pt_regs *regs)
3574 {
3575         struct net_device *dev = dev_id;
3576         struct tg3 *tp = netdev_priv(dev);
3577         struct tg3_hw_status *sblk = tp->hw_status;
3578
3579         if ((sblk->status & SD_STATUS_UPDATED) ||
3580             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3581                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3582                              0x00000001);
3583                 return IRQ_RETVAL(1);
3584         }
3585         return IRQ_RETVAL(0);
3586 }
3587
3588 static int tg3_init_hw(struct tg3 *, int);
3589 static int tg3_halt(struct tg3 *, int, int);
3590
3591 /* Restart hardware after configuration changes, self-test, etc.
3592  * Invoked with tp->lock held.
3593  */
3594 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3595 {
3596         int err;
3597
3598         err = tg3_init_hw(tp, reset_phy);
3599         if (err) {
3600                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3601                        "aborting.\n", tp->dev->name);
3602                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3603                 tg3_full_unlock(tp);
3604                 del_timer_sync(&tp->timer);
3605                 tp->irq_sync = 0;
3606                 netif_poll_enable(tp->dev);
3607                 dev_close(tp->dev);
3608                 tg3_full_lock(tp, 0);
3609         }
3610         return err;
3611 }
3612
3613 #ifdef CONFIG_NET_POLL_CONTROLLER
3614 static void tg3_poll_controller(struct net_device *dev)
3615 {
3616         struct tg3 *tp = netdev_priv(dev);
3617
3618         tg3_interrupt(tp->pdev->irq, dev, NULL);
3619 }
3620 #endif
3621
3622 static void tg3_reset_task(void *_data)
3623 {
3624         struct tg3 *tp = _data;
3625         unsigned int restart_timer;
3626
3627         tg3_full_lock(tp, 0);
3628         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3629
3630         if (!netif_running(tp->dev)) {
3631                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3632                 tg3_full_unlock(tp);
3633                 return;
3634         }
3635
3636         tg3_full_unlock(tp);
3637
3638         tg3_netif_stop(tp);
3639
3640         tg3_full_lock(tp, 1);
3641
3642         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3643         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3644
3645         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3646                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3647                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3648                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3649                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3650         }
3651
3652         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3653         if (tg3_init_hw(tp, 1))
3654                 goto out;
3655
3656         tg3_netif_start(tp);
3657
3658         if (restart_timer)
3659                 mod_timer(&tp->timer, jiffies + 1);
3660
3661 out:
3662         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3663
3664         tg3_full_unlock(tp);
3665 }
3666
3667 static void tg3_tx_timeout(struct net_device *dev)
3668 {
3669         struct tg3 *tp = netdev_priv(dev);
3670
3671         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3672                dev->name);
3673
3674         schedule_work(&tp->reset_task);
3675 }
3676
3677 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3678 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3679 {
3680         u32 base = (u32) mapping & 0xffffffff;
3681
3682         return ((base > 0xffffdcc0) &&
3683                 (base + len + 8 < base));
3684 }
3685
3686 /* Test for DMA addresses > 40-bit */
3687 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3688                                           int len)
3689 {
3690 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3691         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3692                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3693         return 0;
3694 #else
3695         return 0;
3696 #endif
3697 }
3698
3699 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3700
3701 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3702 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3703                                        u32 last_plus_one, u32 *start,
3704                                        u32 base_flags, u32 mss)
3705 {
3706         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3707         dma_addr_t new_addr = 0;
3708         u32 entry = *start;
3709         int i, ret = 0;
3710
3711         if (!new_skb) {
3712                 ret = -1;
3713         } else {
3714                 /* New SKB is guaranteed to be linear. */
3715                 entry = *start;
3716                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3717                                           PCI_DMA_TODEVICE);
3718                 /* Make sure new skb does not cross any 4G boundaries.
3719                  * Drop the packet if it does.
3720                  */
3721                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3722                         ret = -1;
3723                         dev_kfree_skb(new_skb);
3724                         new_skb = NULL;
3725                 } else {
3726                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3727                                     base_flags, 1 | (mss << 1));
3728                         *start = NEXT_TX(entry);
3729                 }
3730         }
3731
3732         /* Now clean up the sw ring entries. */
3733         i = 0;
3734         while (entry != last_plus_one) {
3735                 int len;
3736
3737                 if (i == 0)
3738                         len = skb_headlen(skb);
3739                 else
3740                         len = skb_shinfo(skb)->frags[i-1].size;
3741                 pci_unmap_single(tp->pdev,
3742                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3743                                  len, PCI_DMA_TODEVICE);
3744                 if (i == 0) {
3745                         tp->tx_buffers[entry].skb = new_skb;
3746                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3747                 } else {
3748                         tp->tx_buffers[entry].skb = NULL;
3749                 }
3750                 entry = NEXT_TX(entry);
3751                 i++;
3752         }
3753
3754         dev_kfree_skb(skb);
3755
3756         return ret;
3757 }
3758
3759 static void tg3_set_txd(struct tg3 *tp, int entry,
3760                         dma_addr_t mapping, int len, u32 flags,
3761                         u32 mss_and_is_end)
3762 {
3763         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3764         int is_end = (mss_and_is_end & 0x1);
3765         u32 mss = (mss_and_is_end >> 1);
3766         u32 vlan_tag = 0;
3767
3768         if (is_end)
3769                 flags |= TXD_FLAG_END;
3770         if (flags & TXD_FLAG_VLAN) {
3771                 vlan_tag = flags >> 16;
3772                 flags &= 0xffff;
3773         }
3774         vlan_tag |= (mss << TXD_MSS_SHIFT);
3775
3776         txd->addr_hi = ((u64) mapping >> 32);
3777         txd->addr_lo = ((u64) mapping & 0xffffffff);
3778         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3779         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3780 }
3781
3782 /* hard_start_xmit for devices that don't have any bugs and
3783  * support TG3_FLG2_HW_TSO_2 only.
3784  */
3785 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3786 {
3787         struct tg3 *tp = netdev_priv(dev);
3788         dma_addr_t mapping;
3789         u32 len, entry, base_flags, mss;
3790
3791         len = skb_headlen(skb);
3792
3793         /* We are running in BH disabled context with netif_tx_lock
3794          * and TX reclaim runs via tp->poll inside of a software
3795          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3796          * no IRQ context deadlocks to worry about either.  Rejoice!
3797          */
3798         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3799                 if (!netif_queue_stopped(dev)) {
3800                         netif_stop_queue(dev);
3801
3802                         /* This is a hard error, log it. */
3803                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3804                                "queue awake!\n", dev->name);
3805                 }
3806                 return NETDEV_TX_BUSY;
3807         }
3808
3809         entry = tp->tx_prod;
3810         base_flags = 0;
3811 #if TG3_TSO_SUPPORT != 0
3812         mss = 0;
3813         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3814             (mss = skb_shinfo(skb)->gso_size) != 0) {
3815                 int tcp_opt_len, ip_tcp_len;
3816
3817                 if (skb_header_cloned(skb) &&
3818                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3819                         dev_kfree_skb(skb);
3820                         goto out_unlock;
3821                 }
3822
3823                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3824                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3825                 else {
3826                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3827                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3828                                      sizeof(struct tcphdr);
3829
3830                         skb->nh.iph->check = 0;
3831                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3832                                                      tcp_opt_len);
3833                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3834                 }
3835
3836                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3837                                TXD_FLAG_CPU_POST_DMA);
3838
3839                 skb->h.th->check = 0;
3840
3841         }
3842         else if (skb->ip_summed == CHECKSUM_HW)
3843                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3844 #else
3845         mss = 0;
3846         if (skb->ip_summed == CHECKSUM_HW)
3847                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3848 #endif
3849 #if TG3_VLAN_TAG_USED
3850         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3851                 base_flags |= (TXD_FLAG_VLAN |
3852                                (vlan_tx_tag_get(skb) << 16));
3853 #endif
3854
3855         /* Queue skb data, a.k.a. the main skb fragment. */
3856         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3857
3858         tp->tx_buffers[entry].skb = skb;
3859         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3860
3861         tg3_set_txd(tp, entry, mapping, len, base_flags,
3862                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3863
3864         entry = NEXT_TX(entry);
3865
3866         /* Now loop through additional data fragments, and queue them. */
3867         if (skb_shinfo(skb)->nr_frags > 0) {
3868                 unsigned int i, last;
3869
3870                 last = skb_shinfo(skb)->nr_frags - 1;
3871                 for (i = 0; i <= last; i++) {
3872                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3873
3874                         len = frag->size;
3875                         mapping = pci_map_page(tp->pdev,
3876                                                frag->page,
3877                                                frag->page_offset,
3878                                                len, PCI_DMA_TODEVICE);
3879
3880                         tp->tx_buffers[entry].skb = NULL;
3881                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3882
3883                         tg3_set_txd(tp, entry, mapping, len,
3884                                     base_flags, (i == last) | (mss << 1));
3885
3886                         entry = NEXT_TX(entry);
3887                 }
3888         }
3889
3890         /* Packets are ready, update Tx producer idx local and on card. */
3891         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3892
3893         tp->tx_prod = entry;
3894         if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
3895                 spin_lock(&tp->tx_lock);
3896                 netif_stop_queue(dev);
3897                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3898                         netif_wake_queue(tp->dev);
3899                 spin_unlock(&tp->tx_lock);
3900         }
3901
3902 out_unlock:
3903         mmiowb();
3904
3905         dev->trans_start = jiffies;
3906
3907         return NETDEV_TX_OK;
3908 }
3909
3910 #if TG3_TSO_SUPPORT != 0
3911 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3912
3913 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3914  * TSO header is greater than 80 bytes.
3915  */
3916 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3917 {
3918         struct sk_buff *segs, *nskb;
3919
3920         /* Estimate the number of fragments in the worst case */
3921         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3922                 netif_stop_queue(tp->dev);
3923                 return NETDEV_TX_BUSY;
3924         }
3925
3926         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3927         if (unlikely(IS_ERR(segs)))
3928                 goto tg3_tso_bug_end;
3929
3930         do {
3931                 nskb = segs;
3932                 segs = segs->next;
3933                 nskb->next = NULL;
3934                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3935         } while (segs);
3936
3937 tg3_tso_bug_end:
3938         dev_kfree_skb(skb);
3939
3940         return NETDEV_TX_OK;
3941 }
3942 #endif
3943
3944 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3945  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3946  */
3947 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3948 {
3949         struct tg3 *tp = netdev_priv(dev);
3950         dma_addr_t mapping;
3951         u32 len, entry, base_flags, mss;
3952         int would_hit_hwbug;
3953
3954         len = skb_headlen(skb);
3955
3956         /* We are running in BH disabled context with netif_tx_lock
3957          * and TX reclaim runs via tp->poll inside of a software
3958          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3959          * no IRQ context deadlocks to worry about either.  Rejoice!
3960          */
3961         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3962                 if (!netif_queue_stopped(dev)) {
3963                         netif_stop_queue(dev);
3964
3965                         /* This is a hard error, log it. */
3966                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3967                                "queue awake!\n", dev->name);
3968                 }
3969                 return NETDEV_TX_BUSY;
3970         }
3971
3972         entry = tp->tx_prod;
3973         base_flags = 0;
3974         if (skb->ip_summed == CHECKSUM_HW)
3975                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3976 #if TG3_TSO_SUPPORT != 0
3977         mss = 0;
3978         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3979             (mss = skb_shinfo(skb)->gso_size) != 0) {
3980                 int tcp_opt_len, ip_tcp_len, hdr_len;
3981
3982                 if (skb_header_cloned(skb) &&
3983                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3984                         dev_kfree_skb(skb);
3985                         goto out_unlock;
3986                 }
3987
3988                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3989                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3990
3991                 hdr_len = ip_tcp_len + tcp_opt_len;
3992                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3993                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3994                         return (tg3_tso_bug(tp, skb));
3995
3996                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3997                                TXD_FLAG_CPU_POST_DMA);
3998
3999                 skb->nh.iph->check = 0;
4000                 skb->nh.iph->tot_len = htons(mss + hdr_len);
4001                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4002                         skb->h.th->check = 0;
4003                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4004                 }
4005                 else {
4006                         skb->h.th->check =
4007                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4008                                                    skb->nh.iph->daddr,
4009                                                    0, IPPROTO_TCP, 0);
4010                 }
4011
4012                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4013                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4014                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4015                                 int tsflags;
4016
4017                                 tsflags = ((skb->nh.iph->ihl - 5) +
4018                                            (tcp_opt_len >> 2));
4019                                 mss |= (tsflags << 11);
4020                         }
4021                 } else {
4022                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4023                                 int tsflags;
4024
4025                                 tsflags = ((skb->nh.iph->ihl - 5) +
4026                                            (tcp_opt_len >> 2));
4027                                 base_flags |= tsflags << 12;
4028                         }
4029                 }
4030         }
4031 #else
4032         mss = 0;
4033 #endif
4034 #if TG3_VLAN_TAG_USED
4035         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4036                 base_flags |= (TXD_FLAG_VLAN |
4037                                (vlan_tx_tag_get(skb) << 16));
4038 #endif
4039
4040         /* Queue skb data, a.k.a. the main skb fragment. */
4041         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4042
4043         tp->tx_buffers[entry].skb = skb;
4044         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4045
4046         would_hit_hwbug = 0;
4047
4048         if (tg3_4g_overflow_test(mapping, len))
4049                 would_hit_hwbug = 1;
4050
4051         tg3_set_txd(tp, entry, mapping, len, base_flags,
4052                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4053
4054         entry = NEXT_TX(entry);
4055
4056         /* Now loop through additional data fragments, and queue them. */
4057         if (skb_shinfo(skb)->nr_frags > 0) {
4058                 unsigned int i, last;
4059
4060                 last = skb_shinfo(skb)->nr_frags - 1;
4061                 for (i = 0; i <= last; i++) {
4062                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4063
4064                         len = frag->size;
4065                         mapping = pci_map_page(tp->pdev,
4066                                                frag->page,
4067                                                frag->page_offset,
4068                                                len, PCI_DMA_TODEVICE);
4069
4070                         tp->tx_buffers[entry].skb = NULL;
4071                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4072
4073                         if (tg3_4g_overflow_test(mapping, len))
4074                                 would_hit_hwbug = 1;
4075
4076                         if (tg3_40bit_overflow_test(tp, mapping, len))
4077                                 would_hit_hwbug = 1;
4078
4079                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4080                                 tg3_set_txd(tp, entry, mapping, len,
4081                                             base_flags, (i == last)|(mss << 1));
4082                         else
4083                                 tg3_set_txd(tp, entry, mapping, len,
4084                                             base_flags, (i == last));
4085
4086                         entry = NEXT_TX(entry);
4087                 }
4088         }
4089
4090         if (would_hit_hwbug) {
4091                 u32 last_plus_one = entry;
4092                 u32 start;
4093
4094                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4095                 start &= (TG3_TX_RING_SIZE - 1);
4096
4097                 /* If the workaround fails due to memory/mapping
4098                  * failure, silently drop this packet.
4099                  */
4100                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4101                                                 &start, base_flags, mss))
4102                         goto out_unlock;
4103
4104                 entry = start;
4105         }
4106
4107         /* Packets are ready, update Tx producer idx local and on card. */
4108         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4109
4110         tp->tx_prod = entry;
4111         if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
4112                 spin_lock(&tp->tx_lock);
4113                 netif_stop_queue(dev);
4114                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4115                         netif_wake_queue(tp->dev);
4116                 spin_unlock(&tp->tx_lock);
4117         }
4118
4119 out_unlock:
4120         mmiowb();
4121
4122         dev->trans_start = jiffies;
4123
4124         return NETDEV_TX_OK;
4125 }
4126
4127 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4128                                int new_mtu)
4129 {
4130         dev->mtu = new_mtu;
4131
4132         if (new_mtu > ETH_DATA_LEN) {
4133                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4134                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4135                         ethtool_op_set_tso(dev, 0);
4136                 }
4137                 else
4138                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4139         } else {
4140                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4141                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4142                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4143         }
4144 }
4145
4146 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4147 {
4148         struct tg3 *tp = netdev_priv(dev);
4149         int err;
4150
4151         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4152                 return -EINVAL;
4153
4154         if (!netif_running(dev)) {
4155                 /* We'll just catch it later when the
4156                  * device is up'd.
4157                  */
4158                 tg3_set_mtu(dev, tp, new_mtu);
4159                 return 0;
4160         }
4161
4162         tg3_netif_stop(tp);
4163
4164         tg3_full_lock(tp, 1);
4165
4166         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4167
4168         tg3_set_mtu(dev, tp, new_mtu);
4169
4170         err = tg3_restart_hw(tp, 0);
4171
4172         if (!err)
4173                 tg3_netif_start(tp);
4174
4175         tg3_full_unlock(tp);
4176
4177         return err;
4178 }
4179
4180 /* Free up pending packets in all rx/tx rings.
4181  *
4182  * The chip has been shut down and the driver detached from
4183  * the networking, so no interrupts or new tx packets will
4184  * end up in the driver.  tp->{tx,}lock is not held and we are not
4185  * in an interrupt context and thus may sleep.
4186  */
4187 static void tg3_free_rings(struct tg3 *tp)
4188 {
4189         struct ring_info *rxp;
4190         int i;
4191
4192         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4193                 rxp = &tp->rx_std_buffers[i];
4194
4195                 if (rxp->skb == NULL)
4196                         continue;
4197                 pci_unmap_single(tp->pdev,
4198                                  pci_unmap_addr(rxp, mapping),
4199                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4200                                  PCI_DMA_FROMDEVICE);
4201                 dev_kfree_skb_any(rxp->skb);
4202                 rxp->skb = NULL;
4203         }
4204
4205         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4206                 rxp = &tp->rx_jumbo_buffers[i];
4207
4208                 if (rxp->skb == NULL)
4209                         continue;
4210                 pci_unmap_single(tp->pdev,
4211                                  pci_unmap_addr(rxp, mapping),
4212                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4213                                  PCI_DMA_FROMDEVICE);
4214                 dev_kfree_skb_any(rxp->skb);
4215                 rxp->skb = NULL;
4216         }
4217
4218         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4219                 struct tx_ring_info *txp;
4220                 struct sk_buff *skb;
4221                 int j;
4222
4223                 txp = &tp->tx_buffers[i];
4224                 skb = txp->skb;
4225
4226                 if (skb == NULL) {
4227                         i++;
4228                         continue;
4229                 }
4230
4231                 pci_unmap_single(tp->pdev,
4232                                  pci_unmap_addr(txp, mapping),
4233                                  skb_headlen(skb),
4234                                  PCI_DMA_TODEVICE);
4235                 txp->skb = NULL;
4236
4237                 i++;
4238
4239                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4240                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4241                         pci_unmap_page(tp->pdev,
4242                                        pci_unmap_addr(txp, mapping),
4243                                        skb_shinfo(skb)->frags[j].size,
4244                                        PCI_DMA_TODEVICE);
4245                         i++;
4246                 }
4247
4248                 dev_kfree_skb_any(skb);
4249         }
4250 }
4251
4252 /* Initialize tx/rx rings for packet processing.
4253  *
4254  * The chip has been shut down and the driver detached from
4255  * the networking, so no interrupts or new tx packets will
4256  * end up in the driver.  tp->{tx,}lock are held and thus
4257  * we may not sleep.
4258  */
4259 static int tg3_init_rings(struct tg3 *tp)
4260 {
4261         u32 i;
4262
4263         /* Free up all the SKBs. */
4264         tg3_free_rings(tp);
4265
4266         /* Zero out all descriptors. */
4267         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4268         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4269         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4270         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4271
4272         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4273         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4274             (tp->dev->mtu > ETH_DATA_LEN))
4275                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4276
4277         /* Initialize invariants of the rings, we only set this
4278          * stuff once.  This works because the card does not
4279          * write into the rx buffer posting rings.
4280          */
4281         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4282                 struct tg3_rx_buffer_desc *rxd;
4283
4284                 rxd = &tp->rx_std[i];
4285                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4286                         << RXD_LEN_SHIFT;
4287                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4288                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4289                                (i << RXD_OPAQUE_INDEX_SHIFT));
4290         }
4291
4292         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4293                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4294                         struct tg3_rx_buffer_desc *rxd;
4295
4296                         rxd = &tp->rx_jumbo[i];
4297                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4298                                 << RXD_LEN_SHIFT;
4299                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4300                                 RXD_FLAG_JUMBO;
4301                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4302                                (i << RXD_OPAQUE_INDEX_SHIFT));
4303                 }
4304         }
4305
4306         /* Now allocate fresh SKBs for each rx ring. */
4307         for (i = 0; i < tp->rx_pending; i++) {
4308                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4309                         printk(KERN_WARNING PFX
4310                                "%s: Using a smaller RX standard ring, "
4311                                "only %d out of %d buffers were allocated "
4312                                "successfully.\n",
4313                                tp->dev->name, i, tp->rx_pending);
4314                         if (i == 0)
4315                                 return -ENOMEM;
4316                         tp->rx_pending = i;
4317                         break;
4318                 }
4319         }
4320
4321         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4322                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4323                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4324                                              -1, i) < 0) {
4325                                 printk(KERN_WARNING PFX
4326                                        "%s: Using a smaller RX jumbo ring, "
4327                                        "only %d out of %d buffers were "
4328                                        "allocated successfully.\n",
4329                                        tp->dev->name, i, tp->rx_jumbo_pending);
4330                                 if (i == 0) {
4331                                         tg3_free_rings(tp);
4332                                         return -ENOMEM;
4333                                 }
4334                                 tp->rx_jumbo_pending = i;
4335                                 break;
4336                         }
4337                 }
4338         }
4339         return 0;
4340 }
4341
4342 /*
4343  * Must not be invoked with interrupt sources disabled and
4344  * the hardware shutdown down.
4345  */
4346 static void tg3_free_consistent(struct tg3 *tp)
4347 {
4348         kfree(tp->rx_std_buffers);
4349         tp->rx_std_buffers = NULL;
4350         if (tp->rx_std) {
4351                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4352                                     tp->rx_std, tp->rx_std_mapping);
4353                 tp->rx_std = NULL;
4354         }
4355         if (tp->rx_jumbo) {
4356                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4357                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4358                 tp->rx_jumbo = NULL;
4359         }
4360         if (tp->rx_rcb) {
4361                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4362                                     tp->rx_rcb, tp->rx_rcb_mapping);
4363                 tp->rx_rcb = NULL;
4364         }
4365         if (tp->tx_ring) {
4366                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4367                         tp->tx_ring, tp->tx_desc_mapping);
4368                 tp->tx_ring = NULL;
4369         }
4370         if (tp->hw_status) {
4371                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4372                                     tp->hw_status, tp->status_mapping);
4373                 tp->hw_status = NULL;
4374         }
4375         if (tp->hw_stats) {
4376                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4377                                     tp->hw_stats, tp->stats_mapping);
4378                 tp->hw_stats = NULL;
4379         }
4380 }
4381
4382 /*
4383  * Must not be invoked with interrupt sources disabled and
4384  * the hardware shutdown down.  Can sleep.
4385  */
4386 static int tg3_alloc_consistent(struct tg3 *tp)
4387 {
4388         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4389                                       (TG3_RX_RING_SIZE +
4390                                        TG3_RX_JUMBO_RING_SIZE)) +
4391                                      (sizeof(struct tx_ring_info) *
4392                                       TG3_TX_RING_SIZE),
4393                                      GFP_KERNEL);
4394         if (!tp->rx_std_buffers)
4395                 return -ENOMEM;
4396
4397         memset(tp->rx_std_buffers, 0,
4398                (sizeof(struct ring_info) *
4399                 (TG3_RX_RING_SIZE +
4400                  TG3_RX_JUMBO_RING_SIZE)) +
4401                (sizeof(struct tx_ring_info) *
4402                 TG3_TX_RING_SIZE));
4403
4404         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4405         tp->tx_buffers = (struct tx_ring_info *)
4406                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4407
4408         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4409                                           &tp->rx_std_mapping);
4410         if (!tp->rx_std)
4411                 goto err_out;
4412
4413         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4414                                             &tp->rx_jumbo_mapping);
4415
4416         if (!tp->rx_jumbo)
4417                 goto err_out;
4418
4419         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4420                                           &tp->rx_rcb_mapping);
4421         if (!tp->rx_rcb)
4422                 goto err_out;
4423
4424         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4425                                            &tp->tx_desc_mapping);
4426         if (!tp->tx_ring)
4427                 goto err_out;
4428
4429         tp->hw_status = pci_alloc_consistent(tp->pdev,
4430                                              TG3_HW_STATUS_SIZE,
4431                                              &tp->status_mapping);
4432         if (!tp->hw_status)
4433                 goto err_out;
4434
4435         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4436                                             sizeof(struct tg3_hw_stats),
4437                                             &tp->stats_mapping);
4438         if (!tp->hw_stats)
4439                 goto err_out;
4440
4441         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4442         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4443
4444         return 0;
4445
4446 err_out:
4447         tg3_free_consistent(tp);
4448         return -ENOMEM;
4449 }
4450
4451 #define MAX_WAIT_CNT 1000
4452
4453 /* To stop a block, clear the enable bit and poll till it
4454  * clears.  tp->lock is held.
4455  */
4456 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4457 {
4458         unsigned int i;
4459         u32 val;
4460
4461         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4462                 switch (ofs) {
4463                 case RCVLSC_MODE:
4464                 case DMAC_MODE:
4465                 case MBFREE_MODE:
4466                 case BUFMGR_MODE:
4467                 case MEMARB_MODE:
4468                         /* We can't enable/disable these bits of the
4469                          * 5705/5750, just say success.
4470                          */
4471                         return 0;
4472
4473                 default:
4474                         break;
4475                 };
4476         }
4477
4478         val = tr32(ofs);
4479         val &= ~enable_bit;
4480         tw32_f(ofs, val);
4481
4482         for (i = 0; i < MAX_WAIT_CNT; i++) {
4483                 udelay(100);
4484                 val = tr32(ofs);
4485                 if ((val & enable_bit) == 0)
4486                         break;
4487         }
4488
4489         if (i == MAX_WAIT_CNT && !silent) {
4490                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4491                        "ofs=%lx enable_bit=%x\n",
4492                        ofs, enable_bit);
4493                 return -ENODEV;
4494         }
4495
4496         return 0;
4497 }
4498
4499 /* tp->lock is held. */
4500 static int tg3_abort_hw(struct tg3 *tp, int silent)
4501 {
4502         int i, err;
4503
4504         tg3_disable_ints(tp);
4505
4506         tp->rx_mode &= ~RX_MODE_ENABLE;
4507         tw32_f(MAC_RX_MODE, tp->rx_mode);
4508         udelay(10);
4509
4510         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4511         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4512         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4513         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4514         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4515         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4516
4517         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4518         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4519         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4520         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4521         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4522         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4523         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4524
4525         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4526         tw32_f(MAC_MODE, tp->mac_mode);
4527         udelay(40);
4528
4529         tp->tx_mode &= ~TX_MODE_ENABLE;
4530         tw32_f(MAC_TX_MODE, tp->tx_mode);
4531
4532         for (i = 0; i < MAX_WAIT_CNT; i++) {
4533                 udelay(100);
4534                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4535                         break;
4536         }
4537         if (i >= MAX_WAIT_CNT) {
4538                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4539                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4540                        tp->dev->name, tr32(MAC_TX_MODE));
4541                 err |= -ENODEV;
4542         }
4543
4544         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4545         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4546         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4547
4548         tw32(FTQ_RESET, 0xffffffff);
4549         tw32(FTQ_RESET, 0x00000000);
4550
4551         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4552         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4553
4554         if (tp->hw_status)
4555                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4556         if (tp->hw_stats)
4557                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4558
4559         return err;
4560 }
4561
4562 /* tp->lock is held. */
4563 static int tg3_nvram_lock(struct tg3 *tp)
4564 {
4565         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4566                 int i;
4567
4568                 if (tp->nvram_lock_cnt == 0) {
4569                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4570                         for (i = 0; i < 8000; i++) {
4571                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4572                                         break;
4573                                 udelay(20);
4574                         }
4575                         if (i == 8000) {
4576                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4577                                 return -ENODEV;
4578                         }
4579                 }
4580                 tp->nvram_lock_cnt++;
4581         }
4582         return 0;
4583 }
4584
4585 /* tp->lock is held. */
4586 static void tg3_nvram_unlock(struct tg3 *tp)
4587 {
4588         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4589                 if (tp->nvram_lock_cnt > 0)
4590                         tp->nvram_lock_cnt--;
4591                 if (tp->nvram_lock_cnt == 0)
4592                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4593         }
4594 }
4595
4596 /* tp->lock is held. */
4597 static void tg3_enable_nvram_access(struct tg3 *tp)
4598 {
4599         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4600             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4601                 u32 nvaccess = tr32(NVRAM_ACCESS);
4602
4603                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4604         }
4605 }
4606
4607 /* tp->lock is held. */
4608 static void tg3_disable_nvram_access(struct tg3 *tp)
4609 {
4610         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4611             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4612                 u32 nvaccess = tr32(NVRAM_ACCESS);
4613
4614                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4615         }
4616 }
4617
4618 /* tp->lock is held. */
4619 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4620 {
4621         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4622                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4623
4624         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4625                 switch (kind) {
4626                 case RESET_KIND_INIT:
4627                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4628                                       DRV_STATE_START);
4629                         break;
4630
4631                 case RESET_KIND_SHUTDOWN:
4632                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4633                                       DRV_STATE_UNLOAD);
4634                         break;
4635
4636                 case RESET_KIND_SUSPEND:
4637                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4638                                       DRV_STATE_SUSPEND);
4639                         break;
4640
4641                 default:
4642                         break;
4643                 };
4644         }
4645 }
4646
4647 /* tp->lock is held. */
4648 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4649 {
4650         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4651                 switch (kind) {
4652                 case RESET_KIND_INIT:
4653                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4654                                       DRV_STATE_START_DONE);
4655                         break;
4656
4657                 case RESET_KIND_SHUTDOWN:
4658                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4659                                       DRV_STATE_UNLOAD_DONE);
4660                         break;
4661
4662                 default:
4663                         break;
4664                 };
4665         }
4666 }
4667
4668 /* tp->lock is held. */
4669 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4670 {
4671         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4672                 switch (kind) {
4673                 case RESET_KIND_INIT:
4674                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4675                                       DRV_STATE_START);
4676                         break;
4677
4678                 case RESET_KIND_SHUTDOWN:
4679                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4680                                       DRV_STATE_UNLOAD);
4681                         break;
4682
4683                 case RESET_KIND_SUSPEND:
4684                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4685                                       DRV_STATE_SUSPEND);
4686                         break;
4687
4688                 default:
4689                         break;
4690                 };
4691         }
4692 }
4693
4694 static void tg3_stop_fw(struct tg3 *);
4695
4696 /* tp->lock is held. */
4697 static int tg3_chip_reset(struct tg3 *tp)
4698 {
4699         u32 val;
4700         void (*write_op)(struct tg3 *, u32, u32);
4701         int i;
4702
4703         tg3_nvram_lock(tp);
4704
4705         /* No matching tg3_nvram_unlock() after this because
4706          * chip reset below will undo the nvram lock.
4707          */
4708         tp->nvram_lock_cnt = 0;
4709
4710         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4711             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4712             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4713                 tw32(GRC_FASTBOOT_PC, 0);
4714
4715         /*
4716          * We must avoid the readl() that normally takes place.
4717          * It locks machines, causes machine checks, and other
4718          * fun things.  So, temporarily disable the 5701
4719          * hardware workaround, while we do the reset.
4720          */
4721         write_op = tp->write32;
4722         if (write_op == tg3_write_flush_reg32)
4723                 tp->write32 = tg3_write32;
4724
4725         /* do the reset */
4726         val = GRC_MISC_CFG_CORECLK_RESET;
4727
4728         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4729                 if (tr32(0x7e2c) == 0x60) {
4730                         tw32(0x7e2c, 0x20);
4731                 }
4732                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4733                         tw32(GRC_MISC_CFG, (1 << 29));
4734                         val |= (1 << 29);
4735                 }
4736         }
4737
4738         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4739                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4740         tw32(GRC_MISC_CFG, val);
4741
4742         /* restore 5701 hardware bug workaround write method */
4743         tp->write32 = write_op;
4744
4745         /* Unfortunately, we have to delay before the PCI read back.
4746          * Some 575X chips even will not respond to a PCI cfg access
4747          * when the reset command is given to the chip.
4748          *
4749          * How do these hardware designers expect things to work
4750          * properly if the PCI write is posted for a long period
4751          * of time?  It is always necessary to have some method by
4752          * which a register read back can occur to push the write
4753          * out which does the reset.
4754          *
4755          * For most tg3 variants the trick below was working.
4756          * Ho hum...
4757          */
4758         udelay(120);
4759
4760         /* Flush PCI posted writes.  The normal MMIO registers
4761          * are inaccessible at this time so this is the only
4762          * way to make this reliably (actually, this is no longer
4763          * the case, see above).  I tried to use indirect
4764          * register read/write but this upset some 5701 variants.
4765          */
4766         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4767
4768         udelay(120);
4769
4770         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4771                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4772                         int i;
4773                         u32 cfg_val;
4774
4775                         /* Wait for link training to complete.  */
4776                         for (i = 0; i < 5000; i++)
4777                                 udelay(100);
4778
4779                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4780                         pci_write_config_dword(tp->pdev, 0xc4,
4781                                                cfg_val | (1 << 15));
4782                 }
4783                 /* Set PCIE max payload size and clear error status.  */
4784                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4785         }
4786
4787         /* Re-enable indirect register accesses. */
4788         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4789                                tp->misc_host_ctrl);
4790
4791         /* Set MAX PCI retry to zero. */
4792         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4793         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4794             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4795                 val |= PCISTATE_RETRY_SAME_DMA;
4796         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4797
4798         pci_restore_state(tp->pdev);
4799
4800         /* Make sure PCI-X relaxed ordering bit is clear. */
4801         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4802         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4803         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4804
4805         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4806                 u32 val;
4807
4808                 /* Chip reset on 5780 will reset MSI enable bit,
4809                  * so need to restore it.
4810                  */
4811                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4812                         u16 ctrl;
4813
4814                         pci_read_config_word(tp->pdev,
4815                                              tp->msi_cap + PCI_MSI_FLAGS,
4816                                              &ctrl);
4817                         pci_write_config_word(tp->pdev,
4818                                               tp->msi_cap + PCI_MSI_FLAGS,
4819                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4820                         val = tr32(MSGINT_MODE);
4821                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4822                 }
4823
4824                 val = tr32(MEMARB_MODE);
4825                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4826
4827         } else
4828                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4829
4830         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4831                 tg3_stop_fw(tp);
4832                 tw32(0x5000, 0x400);
4833         }
4834
4835         tw32(GRC_MODE, tp->grc_mode);
4836
4837         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4838                 u32 val = tr32(0xc4);
4839
4840                 tw32(0xc4, val | (1 << 15));
4841         }
4842
4843         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4844             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4845                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4846                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4847                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4848                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4849         }
4850
4851         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4852                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4853                 tw32_f(MAC_MODE, tp->mac_mode);
4854         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4855                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4856                 tw32_f(MAC_MODE, tp->mac_mode);
4857         } else
4858                 tw32_f(MAC_MODE, 0);
4859         udelay(40);
4860
4861         /* Wait for firmware initialization to complete. */
4862         for (i = 0; i < 100000; i++) {
4863                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4864                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4865                         break;
4866                 udelay(10);
4867         }
4868
4869         /* Chip might not be fitted with firmare.  Some Sun onboard
4870          * parts are configured like that.  So don't signal the timeout
4871          * of the above loop as an error, but do report the lack of
4872          * running firmware once.
4873          */
4874         if (i >= 100000 &&
4875             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4876                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4877
4878                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4879                        tp->dev->name);
4880         }
4881
4882         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4883             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4884                 u32 val = tr32(0x7c00);
4885
4886                 tw32(0x7c00, val | (1 << 25));
4887         }
4888
4889         /* Reprobe ASF enable state.  */
4890         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4891         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4892         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4893         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4894                 u32 nic_cfg;
4895
4896                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4897                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4898                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4899                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4900                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4901                 }
4902         }
4903
4904         return 0;
4905 }
4906
4907 /* tp->lock is held. */
4908 static void tg3_stop_fw(struct tg3 *tp)
4909 {
4910         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4911                 u32 val;
4912                 int i;
4913
4914                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4915                 val = tr32(GRC_RX_CPU_EVENT);
4916                 val |= (1 << 14);
4917                 tw32(GRC_RX_CPU_EVENT, val);
4918
4919                 /* Wait for RX cpu to ACK the event.  */
4920                 for (i = 0; i < 100; i++) {
4921                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4922                                 break;
4923                         udelay(1);
4924                 }
4925         }
4926 }
4927
4928 /* tp->lock is held. */
4929 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4930 {
4931         int err;
4932
4933         tg3_stop_fw(tp);
4934
4935         tg3_write_sig_pre_reset(tp, kind);
4936
4937         tg3_abort_hw(tp, silent);
4938         err = tg3_chip_reset(tp);
4939
4940         tg3_write_sig_legacy(tp, kind);
4941         tg3_write_sig_post_reset(tp, kind);
4942
4943         if (err)
4944                 return err;
4945
4946         return 0;
4947 }
4948
4949 #define TG3_FW_RELEASE_MAJOR    0x0
4950 #define TG3_FW_RELASE_MINOR     0x0
4951 #define TG3_FW_RELEASE_FIX      0x0
4952 #define TG3_FW_START_ADDR       0x08000000
4953 #define TG3_FW_TEXT_ADDR        0x08000000
4954 #define TG3_FW_TEXT_LEN         0x9c0
4955 #define TG3_FW_RODATA_ADDR      0x080009c0
4956 #define TG3_FW_RODATA_LEN       0x60
4957 #define TG3_FW_DATA_ADDR        0x08000a40
4958 #define TG3_FW_DATA_LEN         0x20
4959 #define TG3_FW_SBSS_ADDR        0x08000a60
4960 #define TG3_FW_SBSS_LEN         0xc
4961 #define TG3_FW_BSS_ADDR         0x08000a70
4962 #define TG3_FW_BSS_LEN          0x10
4963
4964 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4965         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4966         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4967         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4968         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4969         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4970         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4971         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4972         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4973         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4974         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4975         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4976         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4977         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4978         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4979         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4980         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4981         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4982         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4983         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4984         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4985         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4986         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4987         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4988         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4989         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4990         0, 0, 0, 0, 0, 0,
4991         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4992         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4993         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4994         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4995         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4996         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4997         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4998         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4999         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5000         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5001         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5002         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5003         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5004         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5005         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5006         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5007         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5008         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5009         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5010         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5011         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5012         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5013         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5014         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5015         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5016         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5017         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5018         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5019         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5020         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5021         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5022         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5023         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5024         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5025         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5026         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5027         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5028         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5029         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5030         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5031         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5032         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5033         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5034         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5035         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5036         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5037         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5038         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5039         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5040         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5041         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5042         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5043         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5044         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5045         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5046         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5047         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5048         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5049         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5050         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5051         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5052         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5053         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5054         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5055         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5056 };
5057
5058 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5059         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5060         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5061         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5062         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5063         0x00000000
5064 };
5065
5066 #if 0 /* All zeros, don't eat up space with it. */
5067 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5068         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5069         0x00000000, 0x00000000, 0x00000000, 0x00000000
5070 };
5071 #endif
5072
5073 #define RX_CPU_SCRATCH_BASE     0x30000
5074 #define RX_CPU_SCRATCH_SIZE     0x04000
5075 #define TX_CPU_SCRATCH_BASE     0x34000
5076 #define TX_CPU_SCRATCH_SIZE     0x04000
5077
5078 /* tp->lock is held. */
5079 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5080 {
5081         int i;
5082
5083         BUG_ON(offset == TX_CPU_BASE &&
5084             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5085
5086         if (offset == RX_CPU_BASE) {
5087                 for (i = 0; i < 10000; i++) {
5088                         tw32(offset + CPU_STATE, 0xffffffff);
5089                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5090                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5091                                 break;
5092                 }
5093
5094                 tw32(offset + CPU_STATE, 0xffffffff);
5095                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5096                 udelay(10);
5097         } else {
5098                 for (i = 0; i < 10000; i++) {
5099                         tw32(offset + CPU_STATE, 0xffffffff);
5100                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5101                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5102                                 break;
5103                 }
5104         }
5105
5106         if (i >= 10000) {
5107                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5108                        "and %s CPU\n",
5109                        tp->dev->name,
5110                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5111                 return -ENODEV;
5112         }
5113
5114         /* Clear firmware's nvram arbitration. */
5115         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5116                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5117         return 0;
5118 }
5119
5120 struct fw_info {
5121         unsigned int text_base;
5122         unsigned int text_len;
5123         u32 *text_data;
5124         unsigned int rodata_base;
5125         unsigned int rodata_len;
5126         u32 *rodata_data;
5127         unsigned int data_base;
5128         unsigned int data_len;
5129         u32 *data_data;
5130 };
5131
5132 /* tp->lock is held. */
5133 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5134                                  int cpu_scratch_size, struct fw_info *info)
5135 {
5136         int err, lock_err, i;
5137         void (*write_op)(struct tg3 *, u32, u32);
5138
5139         if (cpu_base == TX_CPU_BASE &&
5140             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5141                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5142                        "TX cpu firmware on %s which is 5705.\n",
5143                        tp->dev->name);
5144                 return -EINVAL;
5145         }
5146
5147         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5148                 write_op = tg3_write_mem;
5149         else
5150                 write_op = tg3_write_indirect_reg32;
5151
5152         /* It is possible that bootcode is still loading at this point.
5153          * Get the nvram lock first before halting the cpu.
5154          */
5155         lock_err = tg3_nvram_lock(tp);
5156         err = tg3_halt_cpu(tp, cpu_base);
5157         if (!lock_err)
5158                 tg3_nvram_unlock(tp);
5159         if (err)
5160                 goto out;
5161
5162         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5163                 write_op(tp, cpu_scratch_base + i, 0);
5164         tw32(cpu_base + CPU_STATE, 0xffffffff);
5165         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5166         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5167                 write_op(tp, (cpu_scratch_base +
5168                               (info->text_base & 0xffff) +
5169                               (i * sizeof(u32))),
5170                          (info->text_data ?
5171                           info->text_data[i] : 0));
5172         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5173                 write_op(tp, (cpu_scratch_base +
5174                               (info->rodata_base & 0xffff) +
5175                               (i * sizeof(u32))),
5176                          (info->rodata_data ?
5177                           info->rodata_data[i] : 0));
5178         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5179                 write_op(tp, (cpu_scratch_base +
5180                               (info->data_base & 0xffff) +
5181                               (i * sizeof(u32))),
5182                          (info->data_data ?
5183                           info->data_data[i] : 0));
5184
5185         err = 0;
5186
5187 out:
5188         return err;
5189 }
5190
5191 /* tp->lock is held. */
5192 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5193 {
5194         struct fw_info info;
5195         int err, i;
5196
5197         info.text_base = TG3_FW_TEXT_ADDR;
5198         info.text_len = TG3_FW_TEXT_LEN;
5199         info.text_data = &tg3FwText[0];
5200         info.rodata_base = TG3_FW_RODATA_ADDR;
5201         info.rodata_len = TG3_FW_RODATA_LEN;
5202         info.rodata_data = &tg3FwRodata[0];
5203         info.data_base = TG3_FW_DATA_ADDR;
5204         info.data_len = TG3_FW_DATA_LEN;
5205         info.data_data = NULL;
5206
5207         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5208                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5209                                     &info);
5210         if (err)
5211                 return err;
5212
5213         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5214                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5215                                     &info);
5216         if (err)
5217                 return err;
5218
5219         /* Now startup only the RX cpu. */
5220         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5221         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5222
5223         for (i = 0; i < 5; i++) {
5224                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5225                         break;
5226                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5227                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5228                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5229                 udelay(1000);
5230         }
5231         if (i >= 5) {
5232                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5233                        "to set RX CPU PC, is %08x should be %08x\n",
5234                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5235                        TG3_FW_TEXT_ADDR);
5236                 return -ENODEV;
5237         }
5238         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5239         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5240
5241         return 0;
5242 }
5243
5244 #if TG3_TSO_SUPPORT != 0
5245
5246 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5247 #define TG3_TSO_FW_RELASE_MINOR         0x6
5248 #define TG3_TSO_FW_RELEASE_FIX          0x0
5249 #define TG3_TSO_FW_START_ADDR           0x08000000
5250 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5251 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5252 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5253 #define TG3_TSO_FW_RODATA_LEN           0x60
5254 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5255 #define TG3_TSO_FW_DATA_LEN             0x30
5256 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5257 #define TG3_TSO_FW_SBSS_LEN             0x2c
5258 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5259 #define TG3_TSO_FW_BSS_LEN              0x894
5260
5261 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5262         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5263         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5264         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5265         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5266         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5267         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5268         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5269         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5270         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5271         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5272         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5273         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5274         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5275         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5276         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5277         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5278         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5279         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5280         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5281         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5282         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5283         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5284         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5285         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5286         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5287         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5288         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5289         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5290         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5291         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5292         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5293         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5294         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5295         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5296         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5297         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5298         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5299         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5300         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5301         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5302         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5303         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5304         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5305         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5306         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5307         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5308         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5309         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5310         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5311         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5312         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5313         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5314         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5315         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5316         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5317         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5318         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5319         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5320         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5321         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5322         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5323         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5324         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5325         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5326         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5327         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5328         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5329         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5330         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5331         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5332         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5333         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5334         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5335         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5336         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5337         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5338         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5339         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5340         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5341         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5342         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5343         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5344         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5345         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5346         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5347         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5348         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5349         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5350         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5351         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5352         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5353         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5354         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5355         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5356         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5357         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5358         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5359         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5360         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5361         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5362         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5363         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5364         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5365         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5366         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5367         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5368         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5369         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5370         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5371         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5372         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5373         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5374         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5375         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5376         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5377         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5378         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5379         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5380         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5381         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5382         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5383         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5384         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5385         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5386         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5387         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5388         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5389         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5390         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5391         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5392         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5393         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5394         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5395         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5396         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5397         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5398         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5399         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5400         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5401         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5402         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5403         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5404         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5405         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5406         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5407         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5408         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5409         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5410         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5411         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5412         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5413         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5414         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5415         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5416         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5417         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5418         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5419         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5420         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5421         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5422         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5423         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5424         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5425         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5426         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5427         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5428         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5429         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5430         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5431         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5432         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5433         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5434         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5435         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5436         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5437         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5438         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5439         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5440         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5441         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5442         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5443         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5444         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5445         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5446         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5447         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5448         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5449         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5450         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5451         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5452         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5453         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5454         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5455         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5456         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5457         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5458         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5459         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5460         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5461         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5462         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5463         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5464         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5465         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5466         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5467         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5468         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5469         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5470         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5471         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5472         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5473         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5474         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5475         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5476         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5477         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5478         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5479         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5480         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5481         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5482         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5483         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5484         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5485         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5486         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5487         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5488         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5489         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5490         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5491         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5492         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5493         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5494         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5495         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5496         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5497         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5498         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5499         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5500         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5501         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5502         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5503         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5504         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5505         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5506         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5507         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5508         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5509         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5510         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5511         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5512         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5513         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5514         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5515         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5516         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5517         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5518         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5519         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5520         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5521         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5522         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5523         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5524         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5525         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5526         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5527         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5528         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5529         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5530         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5531         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5532         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5533         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5534         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5535         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5536         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5537         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5538         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5539         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5540         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5541         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5542         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5543         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5544         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5545         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5546 };
5547
5548 static u32 tg3TsoFwRodata[] = {
5549         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5550         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5551         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5552         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5553         0x00000000,
5554 };
5555
5556 static u32 tg3TsoFwData[] = {
5557         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5558         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5559         0x00000000,
5560 };
5561
5562 /* 5705 needs a special version of the TSO firmware.  */
5563 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5564 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5565 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5566 #define TG3_TSO5_FW_START_ADDR          0x00010000
5567 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5568 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5569 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5570 #define TG3_TSO5_FW_RODATA_LEN          0x50
5571 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5572 #define TG3_TSO5_FW_DATA_LEN            0x20
5573 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5574 #define TG3_TSO5_FW_SBSS_LEN            0x28
5575 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5576 #define TG3_TSO5_FW_BSS_LEN             0x88
5577
5578 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5579         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5580         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5581         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5582         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5583         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5584         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5585         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5586         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5587         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5588         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5589         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5590         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5591         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5592         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5593         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5594         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5595         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5596         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5597         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5598         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5599         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5600         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5601         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5602         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5603         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5604         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5605         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5606         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5607         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5608         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5609         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5610         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5611         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5612         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5613         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5614         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5615         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5616         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5617         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5618         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5619         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5620         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5621         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5622         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5623         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5624         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5625         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5626         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5627         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5628         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5629         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5630         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5631         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5632         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5633         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5634         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5635         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5636         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5637         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5638         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5639         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5640         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5641         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5642         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5643         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5644         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5645         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5646         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5647         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5648         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5649         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5650         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5651         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5652         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5653         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5654         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5655         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5656         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5657         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5658         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5659         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5660         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5661         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5662         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5663         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5664         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5665         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5666         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5667         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5668         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5669         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5670         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5671         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5672         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5673         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5674         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5675         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5676         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5677         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5678         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5679         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5680         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5681         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5682         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5683         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5684         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5685         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5686         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5687         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5688         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5689         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5690         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5691         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5692         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5693         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5694         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5695         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5696         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5697         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5698         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5699         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5700         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5701         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5702         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5703         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5704         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5705         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5706         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5707         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5708         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5709         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5710         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5711         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5712         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5713         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5714         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5715         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5716         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5717         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5718         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5719         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5720         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5721         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5722         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5723         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5724         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5725         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5726         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5727         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5728         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5729         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5730         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5731         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5732         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5733         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5734         0x00000000, 0x00000000, 0x00000000,
5735 };
5736
5737 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5738         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5739         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5740         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5741         0x00000000, 0x00000000, 0x00000000,
5742 };
5743
5744 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5745         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5746         0x00000000, 0x00000000, 0x00000000,
5747 };
5748
5749 /* tp->lock is held. */
5750 static int tg3_load_tso_firmware(struct tg3 *tp)
5751 {
5752         struct fw_info info;
5753         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5754         int err, i;
5755
5756         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5757                 return 0;
5758
5759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5760                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5761                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5762                 info.text_data = &tg3Tso5FwText[0];
5763                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5764                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5765                 info.rodata_data = &tg3Tso5FwRodata[0];
5766                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5767                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5768                 info.data_data = &tg3Tso5FwData[0];
5769                 cpu_base = RX_CPU_BASE;
5770                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5771                 cpu_scratch_size = (info.text_len +
5772                                     info.rodata_len +
5773                                     info.data_len +
5774                                     TG3_TSO5_FW_SBSS_LEN +
5775                                     TG3_TSO5_FW_BSS_LEN);
5776         } else {
5777                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5778                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5779                 info.text_data = &tg3TsoFwText[0];
5780                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5781                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5782                 info.rodata_data = &tg3TsoFwRodata[0];
5783                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5784                 info.data_len = TG3_TSO_FW_DATA_LEN;
5785                 info.data_data = &tg3TsoFwData[0];
5786                 cpu_base = TX_CPU_BASE;
5787                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5788                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5789         }
5790
5791         err = tg3_load_firmware_cpu(tp, cpu_base,
5792                                     cpu_scratch_base, cpu_scratch_size,
5793                                     &info);
5794         if (err)
5795                 return err;
5796
5797         /* Now startup the cpu. */
5798         tw32(cpu_base + CPU_STATE, 0xffffffff);
5799         tw32_f(cpu_base + CPU_PC,    info.text_base);
5800
5801         for (i = 0; i < 5; i++) {
5802                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5803                         break;
5804                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5805                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5806                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5807                 udelay(1000);
5808         }
5809         if (i >= 5) {
5810                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5811                        "to set CPU PC, is %08x should be %08x\n",
5812                        tp->dev->name, tr32(cpu_base + CPU_PC),
5813                        info.text_base);
5814                 return -ENODEV;
5815         }
5816         tw32(cpu_base + CPU_STATE, 0xffffffff);
5817         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5818         return 0;
5819 }
5820
5821 #endif /* TG3_TSO_SUPPORT != 0 */
5822
5823 /* tp->lock is held. */
5824 static void __tg3_set_mac_addr(struct tg3 *tp)
5825 {
5826         u32 addr_high, addr_low;
5827         int i;
5828
5829         addr_high = ((tp->dev->dev_addr[0] << 8) |
5830                      tp->dev->dev_addr[1]);
5831         addr_low = ((tp->dev->dev_addr[2] << 24) |
5832                     (tp->dev->dev_addr[3] << 16) |
5833                     (tp->dev->dev_addr[4] <<  8) |
5834                     (tp->dev->dev_addr[5] <<  0));
5835         for (i = 0; i < 4; i++) {
5836                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5837                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5838         }
5839
5840         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5841             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5842                 for (i = 0; i < 12; i++) {
5843                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5844                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5845                 }
5846         }
5847
5848         addr_high = (tp->dev->dev_addr[0] +
5849                      tp->dev->dev_addr[1] +
5850                      tp->dev->dev_addr[2] +
5851                      tp->dev->dev_addr[3] +
5852                      tp->dev->dev_addr[4] +
5853                      tp->dev->dev_addr[5]) &
5854                 TX_BACKOFF_SEED_MASK;
5855         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5856 }
5857
5858 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5859 {
5860         struct tg3 *tp = netdev_priv(dev);
5861         struct sockaddr *addr = p;
5862         int err = 0;
5863
5864         if (!is_valid_ether_addr(addr->sa_data))
5865                 return -EINVAL;
5866
5867         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5868
5869         if (!netif_running(dev))
5870                 return 0;
5871
5872         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5873                 /* Reset chip so that ASF can re-init any MAC addresses it
5874                  * needs.
5875                  */
5876                 tg3_netif_stop(tp);
5877                 tg3_full_lock(tp, 1);
5878
5879                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5880                 err = tg3_restart_hw(tp, 0);
5881                 if (!err)
5882                         tg3_netif_start(tp);
5883                 tg3_full_unlock(tp);
5884         } else {
5885                 spin_lock_bh(&tp->lock);
5886                 __tg3_set_mac_addr(tp);
5887                 spin_unlock_bh(&tp->lock);
5888         }
5889
5890         return err;
5891 }
5892
5893 /* tp->lock is held. */
5894 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5895                            dma_addr_t mapping, u32 maxlen_flags,
5896                            u32 nic_addr)
5897 {
5898         tg3_write_mem(tp,
5899                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5900                       ((u64) mapping >> 32));
5901         tg3_write_mem(tp,
5902                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5903                       ((u64) mapping & 0xffffffff));
5904         tg3_write_mem(tp,
5905                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5906                        maxlen_flags);
5907
5908         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5909                 tg3_write_mem(tp,
5910                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5911                               nic_addr);
5912 }
5913
5914 static void __tg3_set_rx_mode(struct net_device *);
5915 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5916 {
5917         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5918         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5919         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5920         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5921         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5922                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5923                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5924         }
5925         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5926         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5927         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5928                 u32 val = ec->stats_block_coalesce_usecs;
5929
5930                 if (!netif_carrier_ok(tp->dev))
5931                         val = 0;
5932
5933                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5934         }
5935 }
5936
5937 /* tp->lock is held. */
5938 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5939 {
5940         u32 val, rdmac_mode;
5941         int i, err, limit;
5942
5943         tg3_disable_ints(tp);
5944
5945         tg3_stop_fw(tp);
5946
5947         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5948
5949         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5950                 tg3_abort_hw(tp, 1);
5951         }
5952
5953         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5954                 tg3_phy_reset(tp);
5955
5956         err = tg3_chip_reset(tp);
5957         if (err)
5958                 return err;
5959
5960         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5961
5962         /* This works around an issue with Athlon chipsets on
5963          * B3 tigon3 silicon.  This bit has no effect on any
5964          * other revision.  But do not set this on PCI Express
5965          * chips.
5966          */
5967         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5968                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5969         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5970
5971         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5972             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5973                 val = tr32(TG3PCI_PCISTATE);
5974                 val |= PCISTATE_RETRY_SAME_DMA;
5975                 tw32(TG3PCI_PCISTATE, val);
5976         }
5977
5978         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5979                 /* Enable some hw fixes.  */
5980                 val = tr32(TG3PCI_MSI_DATA);
5981                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5982                 tw32(TG3PCI_MSI_DATA, val);
5983         }
5984
5985         /* Descriptor ring init may make accesses to the
5986          * NIC SRAM area to setup the TX descriptors, so we
5987          * can only do this after the hardware has been
5988          * successfully reset.
5989          */
5990         err = tg3_init_rings(tp);
5991         if (err)
5992                 return err;
5993
5994         /* This value is determined during the probe time DMA
5995          * engine test, tg3_test_dma.
5996          */
5997         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5998
5999         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6000                           GRC_MODE_4X_NIC_SEND_RINGS |
6001                           GRC_MODE_NO_TX_PHDR_CSUM |
6002                           GRC_MODE_NO_RX_PHDR_CSUM);
6003         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6004
6005         /* Pseudo-header checksum is done by hardware logic and not
6006          * the offload processers, so make the chip do the pseudo-
6007          * header checksums on receive.  For transmit it is more
6008          * convenient to do the pseudo-header checksum in software
6009          * as Linux does that on transmit for us in all cases.
6010          */
6011         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6012
6013         tw32(GRC_MODE,
6014              tp->grc_mode |
6015              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6016
6017         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6018         val = tr32(GRC_MISC_CFG);
6019         val &= ~0xff;
6020         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6021         tw32(GRC_MISC_CFG, val);
6022
6023         /* Initialize MBUF/DESC pool. */
6024         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6025                 /* Do nothing.  */
6026         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6027                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6028                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6029                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6030                 else
6031                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6032                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6033                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6034         }
6035 #if TG3_TSO_SUPPORT != 0
6036         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6037                 int fw_len;
6038
6039                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6040                           TG3_TSO5_FW_RODATA_LEN +
6041                           TG3_TSO5_FW_DATA_LEN +
6042                           TG3_TSO5_FW_SBSS_LEN +
6043                           TG3_TSO5_FW_BSS_LEN);
6044                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6045                 tw32(BUFMGR_MB_POOL_ADDR,
6046                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6047                 tw32(BUFMGR_MB_POOL_SIZE,
6048                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6049         }
6050 #endif
6051
6052         if (tp->dev->mtu <= ETH_DATA_LEN) {
6053                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6054                      tp->bufmgr_config.mbuf_read_dma_low_water);
6055                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6056                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6057                 tw32(BUFMGR_MB_HIGH_WATER,
6058                      tp->bufmgr_config.mbuf_high_water);
6059         } else {
6060                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6061                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6062                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6063                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6064                 tw32(BUFMGR_MB_HIGH_WATER,
6065                      tp->bufmgr_config.mbuf_high_water_jumbo);
6066         }
6067         tw32(BUFMGR_DMA_LOW_WATER,
6068              tp->bufmgr_config.dma_low_water);
6069         tw32(BUFMGR_DMA_HIGH_WATER,
6070              tp->bufmgr_config.dma_high_water);
6071
6072         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6073         for (i = 0; i < 2000; i++) {
6074                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6075                         break;
6076                 udelay(10);
6077         }
6078         if (i >= 2000) {
6079                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6080                        tp->dev->name);
6081                 return -ENODEV;
6082         }
6083
6084         /* Setup replenish threshold. */
6085         val = tp->rx_pending / 8;
6086         if (val == 0)
6087                 val = 1;
6088         else if (val > tp->rx_std_max_post)
6089                 val = tp->rx_std_max_post;
6090
6091         tw32(RCVBDI_STD_THRESH, val);
6092
6093         /* Initialize TG3_BDINFO's at:
6094          *  RCVDBDI_STD_BD:     standard eth size rx ring
6095          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6096          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6097          *
6098          * like so:
6099          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6100          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6101          *                              ring attribute flags
6102          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6103          *
6104          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6105          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6106          *
6107          * The size of each ring is fixed in the firmware, but the location is
6108          * configurable.
6109          */
6110         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6111              ((u64) tp->rx_std_mapping >> 32));
6112         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6113              ((u64) tp->rx_std_mapping & 0xffffffff));
6114         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6115              NIC_SRAM_RX_BUFFER_DESC);
6116
6117         /* Don't even try to program the JUMBO/MINI buffer descriptor
6118          * configs on 5705.
6119          */
6120         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6121                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6122                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6123         } else {
6124                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6125                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6126
6127                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6128                      BDINFO_FLAGS_DISABLED);
6129
6130                 /* Setup replenish threshold. */
6131                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6132
6133                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6134                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6135                              ((u64) tp->rx_jumbo_mapping >> 32));
6136                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6137                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6138                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6139                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6140                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6141                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6142                 } else {
6143                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6144                              BDINFO_FLAGS_DISABLED);
6145                 }
6146
6147         }
6148
6149         /* There is only one send ring on 5705/5750, no need to explicitly
6150          * disable the others.
6151          */
6152         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6153                 /* Clear out send RCB ring in SRAM. */
6154                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6155                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6156                                       BDINFO_FLAGS_DISABLED);
6157         }
6158
6159         tp->tx_prod = 0;
6160         tp->tx_cons = 0;
6161         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6162         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6163
6164         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6165                        tp->tx_desc_mapping,
6166                        (TG3_TX_RING_SIZE <<
6167                         BDINFO_FLAGS_MAXLEN_SHIFT),
6168                        NIC_SRAM_TX_BUFFER_DESC);
6169
6170         /* There is only one receive return ring on 5705/5750, no need
6171          * to explicitly disable the others.
6172          */
6173         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6174                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6175                      i += TG3_BDINFO_SIZE) {
6176                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6177                                       BDINFO_FLAGS_DISABLED);
6178                 }
6179         }
6180
6181         tp->rx_rcb_ptr = 0;
6182         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6183
6184         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6185                        tp->rx_rcb_mapping,
6186                        (TG3_RX_RCB_RING_SIZE(tp) <<
6187                         BDINFO_FLAGS_MAXLEN_SHIFT),
6188                        0);
6189
6190         tp->rx_std_ptr = tp->rx_pending;
6191         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6192                      tp->rx_std_ptr);
6193
6194         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6195                                                 tp->rx_jumbo_pending : 0;
6196         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6197                      tp->rx_jumbo_ptr);
6198
6199         /* Initialize MAC address and backoff seed. */
6200         __tg3_set_mac_addr(tp);
6201
6202         /* MTU + ethernet header + FCS + optional VLAN tag */
6203         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6204
6205         /* The slot time is changed by tg3_setup_phy if we
6206          * run at gigabit with half duplex.
6207          */
6208         tw32(MAC_TX_LENGTHS,
6209              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6210              (6 << TX_LENGTHS_IPG_SHIFT) |
6211              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6212
6213         /* Receive rules. */
6214         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6215         tw32(RCVLPC_CONFIG, 0x0181);
6216
6217         /* Calculate RDMAC_MODE setting early, we need it to determine
6218          * the RCVLPC_STATE_ENABLE mask.
6219          */
6220         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6221                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6222                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6223                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6224                       RDMAC_MODE_LNGREAD_ENAB);
6225         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6226                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6227
6228         /* If statement applies to 5705 and 5750 PCI devices only */
6229         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6230              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6231             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6232                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6233                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6234                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6235                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6236                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6237                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6238                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6239                 }
6240         }
6241
6242         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6243                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6244
6245 #if TG3_TSO_SUPPORT != 0
6246         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6247                 rdmac_mode |= (1 << 27);
6248 #endif
6249
6250         /* Receive/send statistics. */
6251         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6252                 val = tr32(RCVLPC_STATS_ENABLE);
6253                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6254                 tw32(RCVLPC_STATS_ENABLE, val);
6255         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6256                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6257                 val = tr32(RCVLPC_STATS_ENABLE);
6258                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6259                 tw32(RCVLPC_STATS_ENABLE, val);
6260         } else {
6261                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6262         }
6263         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6264         tw32(SNDDATAI_STATSENAB, 0xffffff);
6265         tw32(SNDDATAI_STATSCTRL,
6266              (SNDDATAI_SCTRL_ENABLE |
6267               SNDDATAI_SCTRL_FASTUPD));
6268
6269         /* Setup host coalescing engine. */
6270         tw32(HOSTCC_MODE, 0);
6271         for (i = 0; i < 2000; i++) {
6272                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6273                         break;
6274                 udelay(10);
6275         }
6276
6277         __tg3_set_coalesce(tp, &tp->coal);
6278
6279         /* set status block DMA address */
6280         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6281              ((u64) tp->status_mapping >> 32));
6282         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6283              ((u64) tp->status_mapping & 0xffffffff));
6284
6285         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6286                 /* Status/statistics block address.  See tg3_timer,
6287                  * the tg3_periodic_fetch_stats call there, and
6288                  * tg3_get_stats to see how this works for 5705/5750 chips.
6289                  */
6290                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6291                      ((u64) tp->stats_mapping >> 32));
6292                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6293                      ((u64) tp->stats_mapping & 0xffffffff));
6294                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6295                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6296         }
6297
6298         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6299
6300         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6301         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6302         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6303                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6304
6305         /* Clear statistics/status block in chip, and status block in ram. */
6306         for (i = NIC_SRAM_STATS_BLK;
6307              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6308              i += sizeof(u32)) {
6309                 tg3_write_mem(tp, i, 0);
6310                 udelay(40);
6311         }
6312         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6313
6314         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6315                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6316                 /* reset to prevent losing 1st rx packet intermittently */
6317                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6318                 udelay(10);
6319         }
6320
6321         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6322                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6323         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6324         udelay(40);
6325
6326         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6327          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6328          * register to preserve the GPIO settings for LOMs. The GPIOs,
6329          * whether used as inputs or outputs, are set by boot code after
6330          * reset.
6331          */
6332         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6333                 u32 gpio_mask;
6334
6335                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6336                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6337
6338                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6339                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6340                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6341
6342                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6343                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6344
6345                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6346
6347                 /* GPIO1 must be driven high for eeprom write protect */
6348                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6349                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6350         }
6351         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6352         udelay(100);
6353
6354         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6355         tp->last_tag = 0;
6356
6357         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6358                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6359                 udelay(40);
6360         }
6361
6362         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6363                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6364                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6365                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6366                WDMAC_MODE_LNGREAD_ENAB);
6367
6368         /* If statement applies to 5705 and 5750 PCI devices only */
6369         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6370              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6371             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6372                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6373                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6374                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6375                         /* nothing */
6376                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6377                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6378                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6379                         val |= WDMAC_MODE_RX_ACCEL;
6380                 }
6381         }
6382
6383         /* Enable host coalescing bug fix */
6384         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6385             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6386                 val |= (1 << 29);
6387
6388         tw32_f(WDMAC_MODE, val);
6389         udelay(40);
6390
6391         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6392                 val = tr32(TG3PCI_X_CAPS);
6393                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6394                         val &= ~PCIX_CAPS_BURST_MASK;
6395                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6396                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6397                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6398                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6399                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6400                                 val |= (tp->split_mode_max_reqs <<
6401                                         PCIX_CAPS_SPLIT_SHIFT);
6402                 }
6403                 tw32(TG3PCI_X_CAPS, val);
6404         }
6405
6406         tw32_f(RDMAC_MODE, rdmac_mode);
6407         udelay(40);
6408
6409         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6410         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6411                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6412         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6413         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6414         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6415         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6416         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6417 #if TG3_TSO_SUPPORT != 0
6418         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6419                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6420 #endif
6421         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6422         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6423
6424         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6425                 err = tg3_load_5701_a0_firmware_fix(tp);
6426                 if (err)
6427                         return err;
6428         }
6429
6430 #if TG3_TSO_SUPPORT != 0
6431         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6432                 err = tg3_load_tso_firmware(tp);
6433                 if (err)
6434                         return err;
6435         }
6436 #endif
6437
6438         tp->tx_mode = TX_MODE_ENABLE;
6439         tw32_f(MAC_TX_MODE, tp->tx_mode);
6440         udelay(100);
6441
6442         tp->rx_mode = RX_MODE_ENABLE;
6443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6444                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6445
6446         tw32_f(MAC_RX_MODE, tp->rx_mode);
6447         udelay(10);
6448
6449         if (tp->link_config.phy_is_low_power) {
6450                 tp->link_config.phy_is_low_power = 0;
6451                 tp->link_config.speed = tp->link_config.orig_speed;
6452                 tp->link_config.duplex = tp->link_config.orig_duplex;
6453                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6454         }
6455
6456         tp->mi_mode = MAC_MI_MODE_BASE;
6457         tw32_f(MAC_MI_MODE, tp->mi_mode);
6458         udelay(80);
6459
6460         tw32(MAC_LED_CTRL, tp->led_ctrl);
6461
6462         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6463         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6464                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6465                 udelay(10);
6466         }
6467         tw32_f(MAC_RX_MODE, tp->rx_mode);
6468         udelay(10);
6469
6470         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6471                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6472                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6473                         /* Set drive transmission level to 1.2V  */
6474                         /* only if the signal pre-emphasis bit is not set  */
6475                         val = tr32(MAC_SERDES_CFG);
6476                         val &= 0xfffff000;
6477                         val |= 0x880;
6478                         tw32(MAC_SERDES_CFG, val);
6479                 }
6480                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6481                         tw32(MAC_SERDES_CFG, 0x616000);
6482         }
6483
6484         /* Prevent chip from dropping frames when flow control
6485          * is enabled.
6486          */
6487         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6488
6489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6490             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6491                 /* Use hardware link auto-negotiation */
6492                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6493         }
6494
6495         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6496             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6497                 u32 tmp;
6498
6499                 tmp = tr32(SERDES_RX_CTRL);
6500                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6501                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6502                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6503                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6504         }
6505
6506         err = tg3_setup_phy(tp, reset_phy);
6507         if (err)
6508                 return err;
6509
6510         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6511                 u32 tmp;
6512
6513                 /* Clear CRC stats. */
6514                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6515                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6516                         tg3_readphy(tp, 0x14, &tmp);
6517                 }
6518         }
6519
6520         __tg3_set_rx_mode(tp->dev);
6521
6522         /* Initialize receive rules. */
6523         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6524         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6525         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6526         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6527
6528         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6529             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6530                 limit = 8;
6531         else
6532                 limit = 16;
6533         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6534                 limit -= 4;
6535         switch (limit) {
6536         case 16:
6537                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6538         case 15:
6539                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6540         case 14:
6541                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6542         case 13:
6543                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6544         case 12:
6545                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6546         case 11:
6547                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6548         case 10:
6549                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6550         case 9:
6551                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6552         case 8:
6553                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6554         case 7:
6555                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6556         case 6:
6557                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6558         case 5:
6559                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6560         case 4:
6561                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6562         case 3:
6563                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6564         case 2:
6565         case 1:
6566
6567         default:
6568                 break;
6569         };
6570
6571         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6572
6573         return 0;
6574 }
6575
6576 /* Called at device open time to get the chip ready for
6577  * packet processing.  Invoked with tp->lock held.
6578  */
6579 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6580 {
6581         int err;
6582
6583         /* Force the chip into D0. */
6584         err = tg3_set_power_state(tp, PCI_D0);
6585         if (err)
6586                 goto out;
6587
6588         tg3_switch_clocks(tp);
6589
6590         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6591
6592         err = tg3_reset_hw(tp, reset_phy);
6593
6594 out:
6595         return err;
6596 }
6597
6598 #define TG3_STAT_ADD32(PSTAT, REG) \
6599 do {    u32 __val = tr32(REG); \
6600         (PSTAT)->low += __val; \
6601         if ((PSTAT)->low < __val) \
6602                 (PSTAT)->high += 1; \
6603 } while (0)
6604
6605 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6606 {
6607         struct tg3_hw_stats *sp = tp->hw_stats;
6608
6609         if (!netif_carrier_ok(tp->dev))
6610                 return;
6611
6612         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6613         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6614         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6615         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6616         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6617         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6618         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6619         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6620         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6621         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6622         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6623         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6624         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6625
6626         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6627         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6628         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6629         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6630         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6631         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6632         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6633         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6634         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6635         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6636         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6637         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6638         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6639         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6640
6641         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6642         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6643         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6644 }
6645
6646 static void tg3_timer(unsigned long __opaque)
6647 {
6648         struct tg3 *tp = (struct tg3 *) __opaque;
6649
6650         if (tp->irq_sync)
6651                 goto restart_timer;
6652
6653         spin_lock(&tp->lock);
6654
6655         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6656                 /* All of this garbage is because when using non-tagged
6657                  * IRQ status the mailbox/status_block protocol the chip
6658                  * uses with the cpu is race prone.
6659                  */
6660                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6661                         tw32(GRC_LOCAL_CTRL,
6662                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6663                 } else {
6664                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6665                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6666                 }
6667
6668                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6669                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6670                         spin_unlock(&tp->lock);
6671                         schedule_work(&tp->reset_task);
6672                         return;
6673                 }
6674         }
6675
6676         /* This part only runs once per second. */
6677         if (!--tp->timer_counter) {
6678                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6679                         tg3_periodic_fetch_stats(tp);
6680
6681                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6682                         u32 mac_stat;
6683                         int phy_event;
6684
6685                         mac_stat = tr32(MAC_STATUS);
6686
6687                         phy_event = 0;
6688                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6689                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6690                                         phy_event = 1;
6691                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6692                                 phy_event = 1;
6693
6694                         if (phy_event)
6695                                 tg3_setup_phy(tp, 0);
6696                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6697                         u32 mac_stat = tr32(MAC_STATUS);
6698                         int need_setup = 0;
6699
6700                         if (netif_carrier_ok(tp->dev) &&
6701                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6702                                 need_setup = 1;
6703                         }
6704                         if (! netif_carrier_ok(tp->dev) &&
6705                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6706                                          MAC_STATUS_SIGNAL_DET))) {
6707                                 need_setup = 1;
6708                         }
6709                         if (need_setup) {
6710                                 tw32_f(MAC_MODE,
6711                                      (tp->mac_mode &
6712                                       ~MAC_MODE_PORT_MODE_MASK));
6713                                 udelay(40);
6714                                 tw32_f(MAC_MODE, tp->mac_mode);
6715                                 udelay(40);
6716                                 tg3_setup_phy(tp, 0);
6717                         }
6718                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6719                         tg3_serdes_parallel_detect(tp);
6720
6721                 tp->timer_counter = tp->timer_multiplier;
6722         }
6723
6724         /* Heartbeat is only sent once every 2 seconds.  */
6725         if (!--tp->asf_counter) {
6726                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6727                         u32 val;
6728
6729                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6730                                       FWCMD_NICDRV_ALIVE2);
6731                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6732                         /* 5 seconds timeout */
6733                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6734                         val = tr32(GRC_RX_CPU_EVENT);
6735                         val |= (1 << 14);
6736                         tw32(GRC_RX_CPU_EVENT, val);
6737                 }
6738                 tp->asf_counter = tp->asf_multiplier;
6739         }
6740
6741         spin_unlock(&tp->lock);
6742
6743 restart_timer:
6744         tp->timer.expires = jiffies + tp->timer_offset;
6745         add_timer(&tp->timer);
6746 }
6747
6748 static int tg3_request_irq(struct tg3 *tp)
6749 {
6750         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6751         unsigned long flags;
6752         struct net_device *dev = tp->dev;
6753
6754         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6755                 fn = tg3_msi;
6756                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6757                         fn = tg3_msi_1shot;
6758                 flags = IRQF_SAMPLE_RANDOM;
6759         } else {
6760                 fn = tg3_interrupt;
6761                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6762                         fn = tg3_interrupt_tagged;
6763                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6764         }
6765         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6766 }
6767
6768 static int tg3_test_interrupt(struct tg3 *tp)
6769 {
6770         struct net_device *dev = tp->dev;
6771         int err, i;
6772         u32 int_mbox = 0;
6773
6774         if (!netif_running(dev))
6775                 return -ENODEV;
6776
6777         tg3_disable_ints(tp);
6778
6779         free_irq(tp->pdev->irq, dev);
6780
6781         err = request_irq(tp->pdev->irq, tg3_test_isr,
6782                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6783         if (err)
6784                 return err;
6785
6786         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6787         tg3_enable_ints(tp);
6788
6789         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6790                HOSTCC_MODE_NOW);
6791
6792         for (i = 0; i < 5; i++) {
6793                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6794                                         TG3_64BIT_REG_LOW);
6795                 if (int_mbox != 0)
6796                         break;
6797                 msleep(10);
6798         }
6799
6800         tg3_disable_ints(tp);
6801
6802         free_irq(tp->pdev->irq, dev);
6803         
6804         err = tg3_request_irq(tp);
6805
6806         if (err)
6807                 return err;
6808
6809         if (int_mbox != 0)
6810                 return 0;
6811
6812         return -EIO;
6813 }
6814
6815 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6816  * successfully restored
6817  */
6818 static int tg3_test_msi(struct tg3 *tp)
6819 {
6820         struct net_device *dev = tp->dev;
6821         int err;
6822         u16 pci_cmd;
6823
6824         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6825                 return 0;
6826
6827         /* Turn off SERR reporting in case MSI terminates with Master
6828          * Abort.
6829          */
6830         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6831         pci_write_config_word(tp->pdev, PCI_COMMAND,
6832                               pci_cmd & ~PCI_COMMAND_SERR);
6833
6834         err = tg3_test_interrupt(tp);
6835
6836         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6837
6838         if (!err)
6839                 return 0;
6840
6841         /* other failures */
6842         if (err != -EIO)
6843                 return err;
6844
6845         /* MSI test failed, go back to INTx mode */
6846         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6847                "switching to INTx mode. Please report this failure to "
6848                "the PCI maintainer and include system chipset information.\n",
6849                        tp->dev->name);
6850
6851         free_irq(tp->pdev->irq, dev);
6852         pci_disable_msi(tp->pdev);
6853
6854         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6855
6856         err = tg3_request_irq(tp);
6857         if (err)
6858                 return err;
6859
6860         /* Need to reset the chip because the MSI cycle may have terminated
6861          * with Master Abort.
6862          */
6863         tg3_full_lock(tp, 1);
6864
6865         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6866         err = tg3_init_hw(tp, 1);
6867
6868         tg3_full_unlock(tp);
6869
6870         if (err)
6871                 free_irq(tp->pdev->irq, dev);
6872
6873         return err;
6874 }
6875
6876 static int tg3_open(struct net_device *dev)
6877 {
6878         struct tg3 *tp = netdev_priv(dev);
6879         int err;
6880
6881         tg3_full_lock(tp, 0);
6882
6883         err = tg3_set_power_state(tp, PCI_D0);
6884         if (err)
6885                 return err;
6886
6887         tg3_disable_ints(tp);
6888         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6889
6890         tg3_full_unlock(tp);
6891
6892         /* The placement of this call is tied
6893          * to the setup and use of Host TX descriptors.
6894          */
6895         err = tg3_alloc_consistent(tp);
6896         if (err)
6897                 return err;
6898
6899         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6900             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6901             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6902             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6903               (tp->pdev_peer == tp->pdev))) {
6904                 /* All MSI supporting chips should support tagged
6905                  * status.  Assert that this is the case.
6906                  */
6907                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6908                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6909                                "Not using MSI.\n", tp->dev->name);
6910                 } else if (pci_enable_msi(tp->pdev) == 0) {
6911                         u32 msi_mode;
6912
6913                         msi_mode = tr32(MSGINT_MODE);
6914                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6915                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6916                 }
6917         }
6918         err = tg3_request_irq(tp);
6919
6920         if (err) {
6921                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6922                         pci_disable_msi(tp->pdev);
6923                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6924                 }
6925                 tg3_free_consistent(tp);
6926                 return err;
6927         }
6928
6929         tg3_full_lock(tp, 0);
6930
6931         err = tg3_init_hw(tp, 1);
6932         if (err) {
6933                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6934                 tg3_free_rings(tp);
6935         } else {
6936                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6937                         tp->timer_offset = HZ;
6938                 else
6939                         tp->timer_offset = HZ / 10;
6940
6941                 BUG_ON(tp->timer_offset > HZ);
6942                 tp->timer_counter = tp->timer_multiplier =
6943                         (HZ / tp->timer_offset);
6944                 tp->asf_counter = tp->asf_multiplier =
6945                         ((HZ / tp->timer_offset) * 2);
6946
6947                 init_timer(&tp->timer);
6948                 tp->timer.expires = jiffies + tp->timer_offset;
6949                 tp->timer.data = (unsigned long) tp;
6950                 tp->timer.function = tg3_timer;
6951         }
6952
6953         tg3_full_unlock(tp);
6954
6955         if (err) {
6956                 free_irq(tp->pdev->irq, dev);
6957                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6958                         pci_disable_msi(tp->pdev);
6959                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6960                 }
6961                 tg3_free_consistent(tp);
6962                 return err;
6963         }
6964
6965         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6966                 err = tg3_test_msi(tp);
6967
6968                 if (err) {
6969                         tg3_full_lock(tp, 0);
6970
6971                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6972                                 pci_disable_msi(tp->pdev);
6973                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6974                         }
6975                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6976                         tg3_free_rings(tp);
6977                         tg3_free_consistent(tp);
6978
6979                         tg3_full_unlock(tp);
6980
6981                         return err;
6982                 }
6983
6984                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6985                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6986                                 u32 val = tr32(0x7c04);
6987
6988                                 tw32(0x7c04, val | (1 << 29));
6989                         }
6990                 }
6991         }
6992
6993         tg3_full_lock(tp, 0);
6994
6995         add_timer(&tp->timer);
6996         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6997         tg3_enable_ints(tp);
6998
6999         tg3_full_unlock(tp);
7000
7001         netif_start_queue(dev);
7002
7003         return 0;
7004 }
7005
7006 #if 0
7007 /*static*/ void tg3_dump_state(struct tg3 *tp)
7008 {
7009         u32 val32, val32_2, val32_3, val32_4, val32_5;
7010         u16 val16;
7011         int i;
7012
7013         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7014         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7015         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7016                val16, val32);
7017
7018         /* MAC block */
7019         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7020                tr32(MAC_MODE), tr32(MAC_STATUS));
7021         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7022                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7023         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7024                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7025         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7026                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7027
7028         /* Send data initiator control block */
7029         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7030                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7031         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7032                tr32(SNDDATAI_STATSCTRL));
7033
7034         /* Send data completion control block */
7035         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7036
7037         /* Send BD ring selector block */
7038         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7039                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7040
7041         /* Send BD initiator control block */
7042         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7043                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7044
7045         /* Send BD completion control block */
7046         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7047
7048         /* Receive list placement control block */
7049         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7050                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7051         printk("       RCVLPC_STATSCTRL[%08x]\n",
7052                tr32(RCVLPC_STATSCTRL));
7053
7054         /* Receive data and receive BD initiator control block */
7055         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7056                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7057
7058         /* Receive data completion control block */
7059         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7060                tr32(RCVDCC_MODE));
7061
7062         /* Receive BD initiator control block */
7063         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7064                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7065
7066         /* Receive BD completion control block */
7067         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7068                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7069
7070         /* Receive list selector control block */
7071         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7072                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7073
7074         /* Mbuf cluster free block */
7075         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7076                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7077
7078         /* Host coalescing control block */
7079         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7080                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7081         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7082                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7083                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7084         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7085                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7086                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7087         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7088                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7089         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7090                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7091
7092         /* Memory arbiter control block */
7093         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7094                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7095
7096         /* Buffer manager control block */
7097         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7098                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7099         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7100                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7101         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7102                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7103                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7104                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7105
7106         /* Read DMA control block */
7107         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7108                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7109
7110         /* Write DMA control block */
7111         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7112                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7113
7114         /* DMA completion block */
7115         printk("DEBUG: DMAC_MODE[%08x]\n",
7116                tr32(DMAC_MODE));
7117
7118         /* GRC block */
7119         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7120                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7121         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7122                tr32(GRC_LOCAL_CTRL));
7123
7124         /* TG3_BDINFOs */
7125         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7126                tr32(RCVDBDI_JUMBO_BD + 0x0),
7127                tr32(RCVDBDI_JUMBO_BD + 0x4),
7128                tr32(RCVDBDI_JUMBO_BD + 0x8),
7129                tr32(RCVDBDI_JUMBO_BD + 0xc));
7130         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7131                tr32(RCVDBDI_STD_BD + 0x0),
7132                tr32(RCVDBDI_STD_BD + 0x4),
7133                tr32(RCVDBDI_STD_BD + 0x8),
7134                tr32(RCVDBDI_STD_BD + 0xc));
7135         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7136                tr32(RCVDBDI_MINI_BD + 0x0),
7137                tr32(RCVDBDI_MINI_BD + 0x4),
7138                tr32(RCVDBDI_MINI_BD + 0x8),
7139                tr32(RCVDBDI_MINI_BD + 0xc));
7140
7141         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7142         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7143         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7144         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7145         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7146                val32, val32_2, val32_3, val32_4);
7147
7148         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7149         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7150         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7151         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7152         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7153                val32, val32_2, val32_3, val32_4);
7154
7155         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7156         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7157         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7158         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7159         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7160         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7161                val32, val32_2, val32_3, val32_4, val32_5);
7162
7163         /* SW status block */
7164         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7165                tp->hw_status->status,
7166                tp->hw_status->status_tag,
7167                tp->hw_status->rx_jumbo_consumer,
7168                tp->hw_status->rx_consumer,
7169                tp->hw_status->rx_mini_consumer,
7170                tp->hw_status->idx[0].rx_producer,
7171                tp->hw_status->idx[0].tx_consumer);
7172
7173         /* SW statistics block */
7174         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7175                ((u32 *)tp->hw_stats)[0],
7176                ((u32 *)tp->hw_stats)[1],
7177                ((u32 *)tp->hw_stats)[2],
7178                ((u32 *)tp->hw_stats)[3]);
7179
7180         /* Mailboxes */
7181         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7182                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7183                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7184                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7185                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7186
7187         /* NIC side send descriptors. */
7188         for (i = 0; i < 6; i++) {
7189                 unsigned long txd;
7190
7191                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7192                         + (i * sizeof(struct tg3_tx_buffer_desc));
7193                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7194                        i,
7195                        readl(txd + 0x0), readl(txd + 0x4),
7196                        readl(txd + 0x8), readl(txd + 0xc));
7197         }
7198
7199         /* NIC side RX descriptors. */
7200         for (i = 0; i < 6; i++) {
7201                 unsigned long rxd;
7202
7203                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7204                         + (i * sizeof(struct tg3_rx_buffer_desc));
7205                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7206                        i,
7207                        readl(rxd + 0x0), readl(rxd + 0x4),
7208                        readl(rxd + 0x8), readl(rxd + 0xc));
7209                 rxd += (4 * sizeof(u32));
7210                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7211                        i,
7212                        readl(rxd + 0x0), readl(rxd + 0x4),
7213                        readl(rxd + 0x8), readl(rxd + 0xc));
7214         }
7215
7216         for (i = 0; i < 6; i++) {
7217                 unsigned long rxd;
7218
7219                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7220                         + (i * sizeof(struct tg3_rx_buffer_desc));
7221                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7222                        i,
7223                        readl(rxd + 0x0), readl(rxd + 0x4),
7224                        readl(rxd + 0x8), readl(rxd + 0xc));
7225                 rxd += (4 * sizeof(u32));
7226                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7227                        i,
7228                        readl(rxd + 0x0), readl(rxd + 0x4),
7229                        readl(rxd + 0x8), readl(rxd + 0xc));
7230         }
7231 }
7232 #endif
7233
7234 static struct net_device_stats *tg3_get_stats(struct net_device *);
7235 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7236
7237 static int tg3_close(struct net_device *dev)
7238 {
7239         struct tg3 *tp = netdev_priv(dev);
7240
7241         /* Calling flush_scheduled_work() may deadlock because
7242          * linkwatch_event() may be on the workqueue and it will try to get
7243          * the rtnl_lock which we are holding.
7244          */
7245         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7246                 msleep(1);
7247
7248         netif_stop_queue(dev);
7249
7250         del_timer_sync(&tp->timer);
7251
7252         tg3_full_lock(tp, 1);
7253 #if 0
7254         tg3_dump_state(tp);
7255 #endif
7256
7257         tg3_disable_ints(tp);
7258
7259         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7260         tg3_free_rings(tp);
7261         tp->tg3_flags &=
7262                 ~(TG3_FLAG_INIT_COMPLETE |
7263                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7264
7265         tg3_full_unlock(tp);
7266
7267         free_irq(tp->pdev->irq, dev);
7268         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7269                 pci_disable_msi(tp->pdev);
7270                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7271         }
7272
7273         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7274                sizeof(tp->net_stats_prev));
7275         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7276                sizeof(tp->estats_prev));
7277
7278         tg3_free_consistent(tp);
7279
7280         tg3_set_power_state(tp, PCI_D3hot);
7281
7282         netif_carrier_off(tp->dev);
7283
7284         return 0;
7285 }
7286
7287 static inline unsigned long get_stat64(tg3_stat64_t *val)
7288 {
7289         unsigned long ret;
7290
7291 #if (BITS_PER_LONG == 32)
7292         ret = val->low;
7293 #else
7294         ret = ((u64)val->high << 32) | ((u64)val->low);
7295 #endif
7296         return ret;
7297 }
7298
7299 static unsigned long calc_crc_errors(struct tg3 *tp)
7300 {
7301         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7302
7303         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7304             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7305              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7306                 u32 val;
7307
7308                 spin_lock_bh(&tp->lock);
7309                 if (!tg3_readphy(tp, 0x1e, &val)) {
7310                         tg3_writephy(tp, 0x1e, val | 0x8000);
7311                         tg3_readphy(tp, 0x14, &val);
7312                 } else
7313                         val = 0;
7314                 spin_unlock_bh(&tp->lock);
7315
7316                 tp->phy_crc_errors += val;
7317
7318                 return tp->phy_crc_errors;
7319         }
7320
7321         return get_stat64(&hw_stats->rx_fcs_errors);
7322 }
7323
7324 #define ESTAT_ADD(member) \
7325         estats->member =        old_estats->member + \
7326                                 get_stat64(&hw_stats->member)
7327
7328 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7329 {
7330         struct tg3_ethtool_stats *estats = &tp->estats;
7331         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7332         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7333
7334         if (!hw_stats)
7335                 return old_estats;
7336
7337         ESTAT_ADD(rx_octets);
7338         ESTAT_ADD(rx_fragments);
7339         ESTAT_ADD(rx_ucast_packets);
7340         ESTAT_ADD(rx_mcast_packets);
7341         ESTAT_ADD(rx_bcast_packets);
7342         ESTAT_ADD(rx_fcs_errors);
7343         ESTAT_ADD(rx_align_errors);
7344         ESTAT_ADD(rx_xon_pause_rcvd);
7345         ESTAT_ADD(rx_xoff_pause_rcvd);
7346         ESTAT_ADD(rx_mac_ctrl_rcvd);
7347         ESTAT_ADD(rx_xoff_entered);
7348         ESTAT_ADD(rx_frame_too_long_errors);
7349         ESTAT_ADD(rx_jabbers);
7350         ESTAT_ADD(rx_undersize_packets);
7351         ESTAT_ADD(rx_in_length_errors);
7352         ESTAT_ADD(rx_out_length_errors);
7353         ESTAT_ADD(rx_64_or_less_octet_packets);
7354         ESTAT_ADD(rx_65_to_127_octet_packets);
7355         ESTAT_ADD(rx_128_to_255_octet_packets);
7356         ESTAT_ADD(rx_256_to_511_octet_packets);
7357         ESTAT_ADD(rx_512_to_1023_octet_packets);
7358         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7359         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7360         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7361         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7362         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7363
7364         ESTAT_ADD(tx_octets);
7365         ESTAT_ADD(tx_collisions);
7366         ESTAT_ADD(tx_xon_sent);
7367         ESTAT_ADD(tx_xoff_sent);
7368         ESTAT_ADD(tx_flow_control);
7369         ESTAT_ADD(tx_mac_errors);
7370         ESTAT_ADD(tx_single_collisions);
7371         ESTAT_ADD(tx_mult_collisions);
7372         ESTAT_ADD(tx_deferred);
7373         ESTAT_ADD(tx_excessive_collisions);
7374         ESTAT_ADD(tx_late_collisions);
7375         ESTAT_ADD(tx_collide_2times);
7376         ESTAT_ADD(tx_collide_3times);
7377         ESTAT_ADD(tx_collide_4times);
7378         ESTAT_ADD(tx_collide_5times);
7379         ESTAT_ADD(tx_collide_6times);
7380         ESTAT_ADD(tx_collide_7times);
7381         ESTAT_ADD(tx_collide_8times);
7382         ESTAT_ADD(tx_collide_9times);
7383         ESTAT_ADD(tx_collide_10times);
7384         ESTAT_ADD(tx_collide_11times);
7385         ESTAT_ADD(tx_collide_12times);
7386         ESTAT_ADD(tx_collide_13times);
7387         ESTAT_ADD(tx_collide_14times);
7388         ESTAT_ADD(tx_collide_15times);
7389         ESTAT_ADD(tx_ucast_packets);
7390         ESTAT_ADD(tx_mcast_packets);
7391         ESTAT_ADD(tx_bcast_packets);
7392         ESTAT_ADD(tx_carrier_sense_errors);
7393         ESTAT_ADD(tx_discards);
7394         ESTAT_ADD(tx_errors);
7395
7396         ESTAT_ADD(dma_writeq_full);
7397         ESTAT_ADD(dma_write_prioq_full);
7398         ESTAT_ADD(rxbds_empty);
7399         ESTAT_ADD(rx_discards);
7400         ESTAT_ADD(rx_errors);
7401         ESTAT_ADD(rx_threshold_hit);
7402
7403         ESTAT_ADD(dma_readq_full);
7404         ESTAT_ADD(dma_read_prioq_full);
7405         ESTAT_ADD(tx_comp_queue_full);
7406
7407         ESTAT_ADD(ring_set_send_prod_index);
7408         ESTAT_ADD(ring_status_update);
7409         ESTAT_ADD(nic_irqs);
7410         ESTAT_ADD(nic_avoided_irqs);
7411         ESTAT_ADD(nic_tx_threshold_hit);
7412
7413         return estats;
7414 }
7415
7416 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7417 {
7418         struct tg3 *tp = netdev_priv(dev);
7419         struct net_device_stats *stats = &tp->net_stats;
7420         struct net_device_stats *old_stats = &tp->net_stats_prev;
7421         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7422
7423         if (!hw_stats)
7424                 return old_stats;
7425
7426         stats->rx_packets = old_stats->rx_packets +
7427                 get_stat64(&hw_stats->rx_ucast_packets) +
7428                 get_stat64(&hw_stats->rx_mcast_packets) +
7429                 get_stat64(&hw_stats->rx_bcast_packets);
7430                 
7431         stats->tx_packets = old_stats->tx_packets +
7432                 get_stat64(&hw_stats->tx_ucast_packets) +
7433                 get_stat64(&hw_stats->tx_mcast_packets) +
7434                 get_stat64(&hw_stats->tx_bcast_packets);
7435
7436         stats->rx_bytes = old_stats->rx_bytes +
7437                 get_stat64(&hw_stats->rx_octets);
7438         stats->tx_bytes = old_stats->tx_bytes +
7439                 get_stat64(&hw_stats->tx_octets);
7440
7441         stats->rx_errors = old_stats->rx_errors +
7442                 get_stat64(&hw_stats->rx_errors);
7443         stats->tx_errors = old_stats->tx_errors +
7444                 get_stat64(&hw_stats->tx_errors) +
7445                 get_stat64(&hw_stats->tx_mac_errors) +
7446                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7447                 get_stat64(&hw_stats->tx_discards);
7448
7449         stats->multicast = old_stats->multicast +
7450                 get_stat64(&hw_stats->rx_mcast_packets);
7451         stats->collisions = old_stats->collisions +
7452                 get_stat64(&hw_stats->tx_collisions);
7453
7454         stats->rx_length_errors = old_stats->rx_length_errors +
7455                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7456                 get_stat64(&hw_stats->rx_undersize_packets);
7457
7458         stats->rx_over_errors = old_stats->rx_over_errors +
7459                 get_stat64(&hw_stats->rxbds_empty);
7460         stats->rx_frame_errors = old_stats->rx_frame_errors +
7461                 get_stat64(&hw_stats->rx_align_errors);
7462         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7463                 get_stat64(&hw_stats->tx_discards);
7464         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7465                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7466
7467         stats->rx_crc_errors = old_stats->rx_crc_errors +
7468                 calc_crc_errors(tp);
7469
7470         stats->rx_missed_errors = old_stats->rx_missed_errors +
7471                 get_stat64(&hw_stats->rx_discards);
7472
7473         return stats;
7474 }
7475
7476 static inline u32 calc_crc(unsigned char *buf, int len)
7477 {
7478         u32 reg;
7479         u32 tmp;
7480         int j, k;
7481
7482         reg = 0xffffffff;
7483
7484         for (j = 0; j < len; j++) {
7485                 reg ^= buf[j];
7486
7487                 for (k = 0; k < 8; k++) {
7488                         tmp = reg & 0x01;
7489
7490                         reg >>= 1;
7491
7492                         if (tmp) {
7493                                 reg ^= 0xedb88320;
7494                         }
7495                 }
7496         }
7497
7498         return ~reg;
7499 }
7500
7501 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7502 {
7503         /* accept or reject all multicast frames */
7504         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7505         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7506         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7507         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7508 }
7509
7510 static void __tg3_set_rx_mode(struct net_device *dev)
7511 {
7512         struct tg3 *tp = netdev_priv(dev);
7513         u32 rx_mode;
7514
7515         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7516                                   RX_MODE_KEEP_VLAN_TAG);
7517
7518         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7519          * flag clear.
7520          */
7521 #if TG3_VLAN_TAG_USED
7522         if (!tp->vlgrp &&
7523             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7524                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7525 #else
7526         /* By definition, VLAN is disabled always in this
7527          * case.
7528          */
7529         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7530                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7531 #endif
7532
7533         if (dev->flags & IFF_PROMISC) {
7534                 /* Promiscuous mode. */
7535                 rx_mode |= RX_MODE_PROMISC;
7536         } else if (dev->flags & IFF_ALLMULTI) {
7537                 /* Accept all multicast. */
7538                 tg3_set_multi (tp, 1);
7539         } else if (dev->mc_count < 1) {
7540                 /* Reject all multicast. */
7541                 tg3_set_multi (tp, 0);
7542         } else {
7543                 /* Accept one or more multicast(s). */
7544                 struct dev_mc_list *mclist;
7545                 unsigned int i;
7546                 u32 mc_filter[4] = { 0, };
7547                 u32 regidx;
7548                 u32 bit;
7549                 u32 crc;
7550
7551                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7552                      i++, mclist = mclist->next) {
7553
7554                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7555                         bit = ~crc & 0x7f;
7556                         regidx = (bit & 0x60) >> 5;
7557                         bit &= 0x1f;
7558                         mc_filter[regidx] |= (1 << bit);
7559                 }
7560
7561                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7562                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7563                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7564                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7565         }
7566
7567         if (rx_mode != tp->rx_mode) {
7568                 tp->rx_mode = rx_mode;
7569                 tw32_f(MAC_RX_MODE, rx_mode);
7570                 udelay(10);
7571         }
7572 }
7573
7574 static void tg3_set_rx_mode(struct net_device *dev)
7575 {
7576         struct tg3 *tp = netdev_priv(dev);
7577
7578         if (!netif_running(dev))
7579                 return;
7580
7581         tg3_full_lock(tp, 0);
7582         __tg3_set_rx_mode(dev);
7583         tg3_full_unlock(tp);
7584 }
7585
7586 #define TG3_REGDUMP_LEN         (32 * 1024)
7587
7588 static int tg3_get_regs_len(struct net_device *dev)
7589 {
7590         return TG3_REGDUMP_LEN;
7591 }
7592
7593 static void tg3_get_regs(struct net_device *dev,
7594                 struct ethtool_regs *regs, void *_p)
7595 {
7596         u32 *p = _p;
7597         struct tg3 *tp = netdev_priv(dev);
7598         u8 *orig_p = _p;
7599         int i;
7600
7601         regs->version = 0;
7602
7603         memset(p, 0, TG3_REGDUMP_LEN);
7604
7605         if (tp->link_config.phy_is_low_power)
7606                 return;
7607
7608         tg3_full_lock(tp, 0);
7609
7610 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7611 #define GET_REG32_LOOP(base,len)                \
7612 do {    p = (u32 *)(orig_p + (base));           \
7613         for (i = 0; i < len; i += 4)            \
7614                 __GET_REG32((base) + i);        \
7615 } while (0)
7616 #define GET_REG32_1(reg)                        \
7617 do {    p = (u32 *)(orig_p + (reg));            \
7618         __GET_REG32((reg));                     \
7619 } while (0)
7620
7621         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7622         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7623         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7624         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7625         GET_REG32_1(SNDDATAC_MODE);
7626         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7627         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7628         GET_REG32_1(SNDBDC_MODE);
7629         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7630         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7631         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7632         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7633         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7634         GET_REG32_1(RCVDCC_MODE);
7635         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7636         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7637         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7638         GET_REG32_1(MBFREE_MODE);
7639         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7640         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7641         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7642         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7643         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7644         GET_REG32_1(RX_CPU_MODE);
7645         GET_REG32_1(RX_CPU_STATE);
7646         GET_REG32_1(RX_CPU_PGMCTR);
7647         GET_REG32_1(RX_CPU_HWBKPT);
7648         GET_REG32_1(TX_CPU_MODE);
7649         GET_REG32_1(TX_CPU_STATE);
7650         GET_REG32_1(TX_CPU_PGMCTR);
7651         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7652         GET_REG32_LOOP(FTQ_RESET, 0x120);
7653         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7654         GET_REG32_1(DMAC_MODE);
7655         GET_REG32_LOOP(GRC_MODE, 0x4c);
7656         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7657                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7658
7659 #undef __GET_REG32
7660 #undef GET_REG32_LOOP
7661 #undef GET_REG32_1
7662
7663         tg3_full_unlock(tp);
7664 }
7665
7666 static int tg3_get_eeprom_len(struct net_device *dev)
7667 {
7668         struct tg3 *tp = netdev_priv(dev);
7669
7670         return tp->nvram_size;
7671 }
7672
7673 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7674 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7675
7676 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7677 {
7678         struct tg3 *tp = netdev_priv(dev);
7679         int ret;
7680         u8  *pd;
7681         u32 i, offset, len, val, b_offset, b_count;
7682
7683         if (tp->link_config.phy_is_low_power)
7684                 return -EAGAIN;
7685
7686         offset = eeprom->offset;
7687         len = eeprom->len;
7688         eeprom->len = 0;
7689
7690         eeprom->magic = TG3_EEPROM_MAGIC;
7691
7692         if (offset & 3) {
7693                 /* adjustments to start on required 4 byte boundary */
7694                 b_offset = offset & 3;
7695                 b_count = 4 - b_offset;
7696                 if (b_count > len) {
7697                         /* i.e. offset=1 len=2 */
7698                         b_count = len;
7699                 }
7700                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7701                 if (ret)
7702                         return ret;
7703                 val = cpu_to_le32(val);
7704                 memcpy(data, ((char*)&val) + b_offset, b_count);
7705                 len -= b_count;
7706                 offset += b_count;
7707                 eeprom->len += b_count;
7708         }
7709
7710         /* read bytes upto the last 4 byte boundary */
7711         pd = &data[eeprom->len];
7712         for (i = 0; i < (len - (len & 3)); i += 4) {
7713                 ret = tg3_nvram_read(tp, offset + i, &val);
7714                 if (ret) {
7715                         eeprom->len += i;
7716                         return ret;
7717                 }
7718                 val = cpu_to_le32(val);
7719                 memcpy(pd + i, &val, 4);
7720         }
7721         eeprom->len += i;
7722
7723         if (len & 3) {
7724                 /* read last bytes not ending on 4 byte boundary */
7725                 pd = &data[eeprom->len];
7726                 b_count = len & 3;
7727                 b_offset = offset + len - b_count;
7728                 ret = tg3_nvram_read(tp, b_offset, &val);
7729                 if (ret)
7730                         return ret;
7731                 val = cpu_to_le32(val);
7732                 memcpy(pd, ((char*)&val), b_count);
7733                 eeprom->len += b_count;
7734         }
7735         return 0;
7736 }
7737
7738 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7739
7740 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7741 {
7742         struct tg3 *tp = netdev_priv(dev);
7743         int ret;
7744         u32 offset, len, b_offset, odd_len, start, end;
7745         u8 *buf;
7746
7747         if (tp->link_config.phy_is_low_power)
7748                 return -EAGAIN;
7749
7750         if (eeprom->magic != TG3_EEPROM_MAGIC)
7751                 return -EINVAL;
7752
7753         offset = eeprom->offset;
7754         len = eeprom->len;
7755
7756         if ((b_offset = (offset & 3))) {
7757                 /* adjustments to start on required 4 byte boundary */
7758                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7759                 if (ret)
7760                         return ret;
7761                 start = cpu_to_le32(start);
7762                 len += b_offset;
7763                 offset &= ~3;
7764                 if (len < 4)
7765                         len = 4;
7766         }
7767
7768         odd_len = 0;
7769         if (len & 3) {
7770                 /* adjustments to end on required 4 byte boundary */
7771                 odd_len = 1;
7772                 len = (len + 3) & ~3;
7773                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7774                 if (ret)
7775                         return ret;
7776                 end = cpu_to_le32(end);
7777         }
7778
7779         buf = data;
7780         if (b_offset || odd_len) {
7781                 buf = kmalloc(len, GFP_KERNEL);
7782                 if (buf == 0)
7783                         return -ENOMEM;
7784                 if (b_offset)
7785                         memcpy(buf, &start, 4);
7786                 if (odd_len)
7787                         memcpy(buf+len-4, &end, 4);
7788                 memcpy(buf + b_offset, data, eeprom->len);
7789         }
7790
7791         ret = tg3_nvram_write_block(tp, offset, len, buf);
7792
7793         if (buf != data)
7794                 kfree(buf);
7795
7796         return ret;
7797 }
7798
7799 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7800 {
7801         struct tg3 *tp = netdev_priv(dev);
7802   
7803         cmd->supported = (SUPPORTED_Autoneg);
7804
7805         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7806                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7807                                    SUPPORTED_1000baseT_Full);
7808
7809         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7810                 cmd->supported |= (SUPPORTED_100baseT_Half |
7811                                   SUPPORTED_100baseT_Full |
7812                                   SUPPORTED_10baseT_Half |
7813                                   SUPPORTED_10baseT_Full |
7814                                   SUPPORTED_MII);
7815                 cmd->port = PORT_TP;
7816         } else {
7817                 cmd->supported |= SUPPORTED_FIBRE;
7818                 cmd->port = PORT_FIBRE;
7819         }
7820   
7821         cmd->advertising = tp->link_config.advertising;
7822         if (netif_running(dev)) {
7823                 cmd->speed = tp->link_config.active_speed;
7824                 cmd->duplex = tp->link_config.active_duplex;
7825         }
7826         cmd->phy_address = PHY_ADDR;
7827         cmd->transceiver = 0;
7828         cmd->autoneg = tp->link_config.autoneg;
7829         cmd->maxtxpkt = 0;
7830         cmd->maxrxpkt = 0;
7831         return 0;
7832 }
7833   
7834 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7835 {
7836         struct tg3 *tp = netdev_priv(dev);
7837   
7838         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7839                 /* These are the only valid advertisement bits allowed.  */
7840                 if (cmd->autoneg == AUTONEG_ENABLE &&
7841                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7842                                           ADVERTISED_1000baseT_Full |
7843                                           ADVERTISED_Autoneg |
7844                                           ADVERTISED_FIBRE)))
7845                         return -EINVAL;
7846                 /* Fiber can only do SPEED_1000.  */
7847                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7848                          (cmd->speed != SPEED_1000))
7849                         return -EINVAL;
7850         /* Copper cannot force SPEED_1000.  */
7851         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7852                    (cmd->speed == SPEED_1000))
7853                 return -EINVAL;
7854         else if ((cmd->speed == SPEED_1000) &&
7855                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7856                 return -EINVAL;
7857
7858         tg3_full_lock(tp, 0);
7859
7860         tp->link_config.autoneg = cmd->autoneg;
7861         if (cmd->autoneg == AUTONEG_ENABLE) {
7862                 tp->link_config.advertising = cmd->advertising;
7863                 tp->link_config.speed = SPEED_INVALID;
7864                 tp->link_config.duplex = DUPLEX_INVALID;
7865         } else {
7866                 tp->link_config.advertising = 0;
7867                 tp->link_config.speed = cmd->speed;
7868                 tp->link_config.duplex = cmd->duplex;
7869         }
7870   
7871         if (netif_running(dev))
7872                 tg3_setup_phy(tp, 1);
7873
7874         tg3_full_unlock(tp);
7875   
7876         return 0;
7877 }
7878   
7879 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7880 {
7881         struct tg3 *tp = netdev_priv(dev);
7882   
7883         strcpy(info->driver, DRV_MODULE_NAME);
7884         strcpy(info->version, DRV_MODULE_VERSION);
7885         strcpy(info->fw_version, tp->fw_ver);
7886         strcpy(info->bus_info, pci_name(tp->pdev));
7887 }
7888   
7889 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7890 {
7891         struct tg3 *tp = netdev_priv(dev);
7892   
7893         wol->supported = WAKE_MAGIC;
7894         wol->wolopts = 0;
7895         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7896                 wol->wolopts = WAKE_MAGIC;
7897         memset(&wol->sopass, 0, sizeof(wol->sopass));
7898 }
7899   
7900 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7901 {
7902         struct tg3 *tp = netdev_priv(dev);
7903   
7904         if (wol->wolopts & ~WAKE_MAGIC)
7905                 return -EINVAL;
7906         if ((wol->wolopts & WAKE_MAGIC) &&
7907             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7908             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7909                 return -EINVAL;
7910   
7911         spin_lock_bh(&tp->lock);
7912         if (wol->wolopts & WAKE_MAGIC)
7913                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7914         else
7915                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7916         spin_unlock_bh(&tp->lock);
7917   
7918         return 0;
7919 }
7920   
7921 static u32 tg3_get_msglevel(struct net_device *dev)
7922 {
7923         struct tg3 *tp = netdev_priv(dev);
7924         return tp->msg_enable;
7925 }
7926   
7927 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7928 {
7929         struct tg3 *tp = netdev_priv(dev);
7930         tp->msg_enable = value;
7931 }
7932   
7933 #if TG3_TSO_SUPPORT != 0
7934 static int tg3_set_tso(struct net_device *dev, u32 value)
7935 {
7936         struct tg3 *tp = netdev_priv(dev);
7937
7938         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7939                 if (value)
7940                         return -EINVAL;
7941                 return 0;
7942         }
7943         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
7944                 if (value)
7945                         dev->features |= NETIF_F_TSO6;
7946                 else
7947                         dev->features &= ~NETIF_F_TSO6;
7948         }
7949         return ethtool_op_set_tso(dev, value);
7950 }
7951 #endif
7952   
7953 static int tg3_nway_reset(struct net_device *dev)
7954 {
7955         struct tg3 *tp = netdev_priv(dev);
7956         u32 bmcr;
7957         int r;
7958   
7959         if (!netif_running(dev))
7960                 return -EAGAIN;
7961
7962         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7963                 return -EINVAL;
7964
7965         spin_lock_bh(&tp->lock);
7966         r = -EINVAL;
7967         tg3_readphy(tp, MII_BMCR, &bmcr);
7968         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7969             ((bmcr & BMCR_ANENABLE) ||
7970              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7971                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7972                                            BMCR_ANENABLE);
7973                 r = 0;
7974         }
7975         spin_unlock_bh(&tp->lock);
7976   
7977         return r;
7978 }
7979   
7980 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7981 {
7982         struct tg3 *tp = netdev_priv(dev);
7983   
7984         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7985         ering->rx_mini_max_pending = 0;
7986         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7987                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7988         else
7989                 ering->rx_jumbo_max_pending = 0;
7990
7991         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7992
7993         ering->rx_pending = tp->rx_pending;
7994         ering->rx_mini_pending = 0;
7995         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7996                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7997         else
7998                 ering->rx_jumbo_pending = 0;
7999
8000         ering->tx_pending = tp->tx_pending;
8001 }
8002   
8003 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8004 {
8005         struct tg3 *tp = netdev_priv(dev);
8006         int irq_sync = 0, err = 0;
8007   
8008         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8009             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8010             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
8011                 return -EINVAL;
8012   
8013         if (netif_running(dev)) {
8014                 tg3_netif_stop(tp);
8015                 irq_sync = 1;
8016         }
8017
8018         tg3_full_lock(tp, irq_sync);
8019   
8020         tp->rx_pending = ering->rx_pending;
8021
8022         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8023             tp->rx_pending > 63)
8024                 tp->rx_pending = 63;
8025         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8026         tp->tx_pending = ering->tx_pending;
8027
8028         if (netif_running(dev)) {
8029                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8030                 err = tg3_restart_hw(tp, 1);
8031                 if (!err)
8032                         tg3_netif_start(tp);
8033         }
8034
8035         tg3_full_unlock(tp);
8036   
8037         return err;
8038 }
8039   
8040 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8041 {
8042         struct tg3 *tp = netdev_priv(dev);
8043   
8044         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8045         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8046         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8047 }
8048   
8049 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8050 {
8051         struct tg3 *tp = netdev_priv(dev);
8052         int irq_sync = 0, err = 0;
8053   
8054         if (netif_running(dev)) {
8055                 tg3_netif_stop(tp);
8056                 irq_sync = 1;
8057         }
8058
8059         tg3_full_lock(tp, irq_sync);
8060
8061         if (epause->autoneg)
8062                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8063         else
8064                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8065         if (epause->rx_pause)
8066                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8067         else
8068                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8069         if (epause->tx_pause)
8070                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8071         else
8072                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8073
8074         if (netif_running(dev)) {
8075                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8076                 err = tg3_restart_hw(tp, 1);
8077                 if (!err)
8078                         tg3_netif_start(tp);
8079         }
8080
8081         tg3_full_unlock(tp);
8082   
8083         return err;
8084 }
8085   
8086 static u32 tg3_get_rx_csum(struct net_device *dev)
8087 {
8088         struct tg3 *tp = netdev_priv(dev);
8089         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8090 }
8091   
8092 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8093 {
8094         struct tg3 *tp = netdev_priv(dev);
8095   
8096         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8097                 if (data != 0)
8098                         return -EINVAL;
8099                 return 0;
8100         }
8101   
8102         spin_lock_bh(&tp->lock);
8103         if (data)
8104                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8105         else
8106                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8107         spin_unlock_bh(&tp->lock);
8108   
8109         return 0;
8110 }
8111   
8112 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8113 {
8114         struct tg3 *tp = netdev_priv(dev);
8115   
8116         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8117                 if (data != 0)
8118                         return -EINVAL;
8119                 return 0;
8120         }
8121   
8122         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8123             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8124                 ethtool_op_set_tx_hw_csum(dev, data);
8125         else
8126                 ethtool_op_set_tx_csum(dev, data);
8127
8128         return 0;
8129 }
8130
8131 static int tg3_get_stats_count (struct net_device *dev)
8132 {
8133         return TG3_NUM_STATS;
8134 }
8135
8136 static int tg3_get_test_count (struct net_device *dev)
8137 {
8138         return TG3_NUM_TEST;
8139 }
8140
8141 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8142 {
8143         switch (stringset) {
8144         case ETH_SS_STATS:
8145                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8146                 break;
8147         case ETH_SS_TEST:
8148                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8149                 break;
8150         default:
8151                 WARN_ON(1);     /* we need a WARN() */
8152                 break;
8153         }
8154 }
8155
8156 static int tg3_phys_id(struct net_device *dev, u32 data)
8157 {
8158         struct tg3 *tp = netdev_priv(dev);
8159         int i;
8160
8161         if (!netif_running(tp->dev))
8162                 return -EAGAIN;
8163
8164         if (data == 0)
8165                 data = 2;
8166
8167         for (i = 0; i < (data * 2); i++) {
8168                 if ((i % 2) == 0)
8169                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8170                                            LED_CTRL_1000MBPS_ON |
8171                                            LED_CTRL_100MBPS_ON |
8172                                            LED_CTRL_10MBPS_ON |
8173                                            LED_CTRL_TRAFFIC_OVERRIDE |
8174                                            LED_CTRL_TRAFFIC_BLINK |
8175                                            LED_CTRL_TRAFFIC_LED);
8176         
8177                 else
8178                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8179                                            LED_CTRL_TRAFFIC_OVERRIDE);
8180
8181                 if (msleep_interruptible(500))
8182                         break;
8183         }
8184         tw32(MAC_LED_CTRL, tp->led_ctrl);
8185         return 0;
8186 }
8187
8188 static void tg3_get_ethtool_stats (struct net_device *dev,
8189                                    struct ethtool_stats *estats, u64 *tmp_stats)
8190 {
8191         struct tg3 *tp = netdev_priv(dev);
8192         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8193 }
8194
8195 #define NVRAM_TEST_SIZE 0x100
8196 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8197
8198 static int tg3_test_nvram(struct tg3 *tp)
8199 {
8200         u32 *buf, csum, magic;
8201         int i, j, err = 0, size;
8202
8203         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8204                 return -EIO;
8205
8206         if (magic == TG3_EEPROM_MAGIC)
8207                 size = NVRAM_TEST_SIZE;
8208         else if ((magic & 0xff000000) == 0xa5000000) {
8209                 if ((magic & 0xe00000) == 0x200000)
8210                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8211                 else
8212                         return 0;
8213         } else
8214                 return -EIO;
8215
8216         buf = kmalloc(size, GFP_KERNEL);
8217         if (buf == NULL)
8218                 return -ENOMEM;
8219
8220         err = -EIO;
8221         for (i = 0, j = 0; i < size; i += 4, j++) {
8222                 u32 val;
8223
8224                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8225                         break;
8226                 buf[j] = cpu_to_le32(val);
8227         }
8228         if (i < size)
8229                 goto out;
8230
8231         /* Selfboot format */
8232         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8233                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8234
8235                 for (i = 0; i < size; i++)
8236                         csum8 += buf8[i];
8237
8238                 if (csum8 == 0) {
8239                         err = 0;
8240                         goto out;
8241                 }
8242
8243                 err = -EIO;
8244                 goto out;
8245         }
8246
8247         /* Bootstrap checksum at offset 0x10 */
8248         csum = calc_crc((unsigned char *) buf, 0x10);
8249         if(csum != cpu_to_le32(buf[0x10/4]))
8250                 goto out;
8251
8252         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8253         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8254         if (csum != cpu_to_le32(buf[0xfc/4]))
8255                  goto out;
8256
8257         err = 0;
8258
8259 out:
8260         kfree(buf);
8261         return err;
8262 }
8263
8264 #define TG3_SERDES_TIMEOUT_SEC  2
8265 #define TG3_COPPER_TIMEOUT_SEC  6
8266
8267 static int tg3_test_link(struct tg3 *tp)
8268 {
8269         int i, max;
8270
8271         if (!netif_running(tp->dev))
8272                 return -ENODEV;
8273
8274         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8275                 max = TG3_SERDES_TIMEOUT_SEC;
8276         else
8277                 max = TG3_COPPER_TIMEOUT_SEC;
8278
8279         for (i = 0; i < max; i++) {
8280                 if (netif_carrier_ok(tp->dev))
8281                         return 0;
8282
8283                 if (msleep_interruptible(1000))
8284                         break;
8285         }
8286
8287         return -EIO;
8288 }
8289
8290 /* Only test the commonly used registers */
8291 static int tg3_test_registers(struct tg3 *tp)
8292 {
8293         int i, is_5705;
8294         u32 offset, read_mask, write_mask, val, save_val, read_val;
8295         static struct {
8296                 u16 offset;
8297                 u16 flags;
8298 #define TG3_FL_5705     0x1
8299 #define TG3_FL_NOT_5705 0x2
8300 #define TG3_FL_NOT_5788 0x4
8301                 u32 read_mask;
8302                 u32 write_mask;
8303         } reg_tbl[] = {
8304                 /* MAC Control Registers */
8305                 { MAC_MODE, TG3_FL_NOT_5705,
8306                         0x00000000, 0x00ef6f8c },
8307                 { MAC_MODE, TG3_FL_5705,
8308                         0x00000000, 0x01ef6b8c },
8309                 { MAC_STATUS, TG3_FL_NOT_5705,
8310                         0x03800107, 0x00000000 },
8311                 { MAC_STATUS, TG3_FL_5705,
8312                         0x03800100, 0x00000000 },
8313                 { MAC_ADDR_0_HIGH, 0x0000,
8314                         0x00000000, 0x0000ffff },
8315                 { MAC_ADDR_0_LOW, 0x0000,
8316                         0x00000000, 0xffffffff },
8317                 { MAC_RX_MTU_SIZE, 0x0000,
8318                         0x00000000, 0x0000ffff },
8319                 { MAC_TX_MODE, 0x0000,
8320                         0x00000000, 0x00000070 },
8321                 { MAC_TX_LENGTHS, 0x0000,
8322                         0x00000000, 0x00003fff },
8323                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8324                         0x00000000, 0x000007fc },
8325                 { MAC_RX_MODE, TG3_FL_5705,
8326                         0x00000000, 0x000007dc },
8327                 { MAC_HASH_REG_0, 0x0000,
8328                         0x00000000, 0xffffffff },
8329                 { MAC_HASH_REG_1, 0x0000,
8330                         0x00000000, 0xffffffff },
8331                 { MAC_HASH_REG_2, 0x0000,
8332                         0x00000000, 0xffffffff },
8333                 { MAC_HASH_REG_3, 0x0000,
8334                         0x00000000, 0xffffffff },
8335
8336                 /* Receive Data and Receive BD Initiator Control Registers. */
8337                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8338                         0x00000000, 0xffffffff },
8339                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8340                         0x00000000, 0xffffffff },
8341                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8342                         0x00000000, 0x00000003 },
8343                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8344                         0x00000000, 0xffffffff },
8345                 { RCVDBDI_STD_BD+0, 0x0000,
8346                         0x00000000, 0xffffffff },
8347                 { RCVDBDI_STD_BD+4, 0x0000,
8348                         0x00000000, 0xffffffff },
8349                 { RCVDBDI_STD_BD+8, 0x0000,
8350                         0x00000000, 0xffff0002 },
8351                 { RCVDBDI_STD_BD+0xc, 0x0000,
8352                         0x00000000, 0xffffffff },
8353         
8354                 /* Receive BD Initiator Control Registers. */
8355                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8356                         0x00000000, 0xffffffff },
8357                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8358                         0x00000000, 0x000003ff },
8359                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8360                         0x00000000, 0xffffffff },
8361         
8362                 /* Host Coalescing Control Registers. */
8363                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8364                         0x00000000, 0x00000004 },
8365                 { HOSTCC_MODE, TG3_FL_5705,
8366                         0x00000000, 0x000000f6 },
8367                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8368                         0x00000000, 0xffffffff },
8369                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8370                         0x00000000, 0x000003ff },
8371                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8372                         0x00000000, 0xffffffff },
8373                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8374                         0x00000000, 0x000003ff },
8375                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8376                         0x00000000, 0xffffffff },
8377                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8378                         0x00000000, 0x000000ff },
8379                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8380                         0x00000000, 0xffffffff },
8381                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8382                         0x00000000, 0x000000ff },
8383                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8384                         0x00000000, 0xffffffff },
8385                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8386                         0x00000000, 0xffffffff },
8387                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8388                         0x00000000, 0xffffffff },
8389                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8390                         0x00000000, 0x000000ff },
8391                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8392                         0x00000000, 0xffffffff },
8393                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8394                         0x00000000, 0x000000ff },
8395                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8396                         0x00000000, 0xffffffff },
8397                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8398                         0x00000000, 0xffffffff },
8399                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8400                         0x00000000, 0xffffffff },
8401                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8402                         0x00000000, 0xffffffff },
8403                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8404                         0x00000000, 0xffffffff },
8405                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8406                         0xffffffff, 0x00000000 },
8407                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8408                         0xffffffff, 0x00000000 },
8409
8410                 /* Buffer Manager Control Registers. */
8411                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8412                         0x00000000, 0x007fff80 },
8413                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8414                         0x00000000, 0x007fffff },
8415                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8416                         0x00000000, 0x0000003f },
8417                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8418                         0x00000000, 0x000001ff },
8419                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8420                         0x00000000, 0x000001ff },
8421                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8422                         0xffffffff, 0x00000000 },
8423                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8424                         0xffffffff, 0x00000000 },
8425         
8426                 /* Mailbox Registers */
8427                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8428                         0x00000000, 0x000001ff },
8429                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8430                         0x00000000, 0x000001ff },
8431                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8432                         0x00000000, 0x000007ff },
8433                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8434                         0x00000000, 0x000001ff },
8435
8436                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8437         };
8438
8439         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8440                 is_5705 = 1;
8441         else
8442                 is_5705 = 0;
8443
8444         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8445                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8446                         continue;
8447
8448                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8449                         continue;
8450
8451                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8452                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8453                         continue;
8454
8455                 offset = (u32) reg_tbl[i].offset;
8456                 read_mask = reg_tbl[i].read_mask;
8457                 write_mask = reg_tbl[i].write_mask;
8458
8459                 /* Save the original register content */
8460                 save_val = tr32(offset);
8461
8462                 /* Determine the read-only value. */
8463                 read_val = save_val & read_mask;
8464
8465                 /* Write zero to the register, then make sure the read-only bits
8466                  * are not changed and the read/write bits are all zeros.
8467                  */
8468                 tw32(offset, 0);
8469
8470                 val = tr32(offset);
8471
8472                 /* Test the read-only and read/write bits. */
8473                 if (((val & read_mask) != read_val) || (val & write_mask))
8474                         goto out;
8475
8476                 /* Write ones to all the bits defined by RdMask and WrMask, then
8477                  * make sure the read-only bits are not changed and the
8478                  * read/write bits are all ones.
8479                  */
8480                 tw32(offset, read_mask | write_mask);
8481
8482                 val = tr32(offset);
8483
8484                 /* Test the read-only bits. */
8485                 if ((val & read_mask) != read_val)
8486                         goto out;
8487
8488                 /* Test the read/write bits. */
8489                 if ((val & write_mask) != write_mask)
8490                         goto out;
8491
8492                 tw32(offset, save_val);
8493         }
8494
8495         return 0;
8496
8497 out:
8498         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8499         tw32(offset, save_val);
8500         return -EIO;
8501 }
8502
8503 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8504 {
8505         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8506         int i;
8507         u32 j;
8508
8509         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8510                 for (j = 0; j < len; j += 4) {
8511                         u32 val;
8512
8513                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8514                         tg3_read_mem(tp, offset + j, &val);
8515                         if (val != test_pattern[i])
8516                                 return -EIO;
8517                 }
8518         }
8519         return 0;
8520 }
8521
8522 static int tg3_test_memory(struct tg3 *tp)
8523 {
8524         static struct mem_entry {
8525                 u32 offset;
8526                 u32 len;
8527         } mem_tbl_570x[] = {
8528                 { 0x00000000, 0x00b50},
8529                 { 0x00002000, 0x1c000},
8530                 { 0xffffffff, 0x00000}
8531         }, mem_tbl_5705[] = {
8532                 { 0x00000100, 0x0000c},
8533                 { 0x00000200, 0x00008},
8534                 { 0x00004000, 0x00800},
8535                 { 0x00006000, 0x01000},
8536                 { 0x00008000, 0x02000},
8537                 { 0x00010000, 0x0e000},
8538                 { 0xffffffff, 0x00000}
8539         }, mem_tbl_5755[] = {
8540                 { 0x00000200, 0x00008},
8541                 { 0x00004000, 0x00800},
8542                 { 0x00006000, 0x00800},
8543                 { 0x00008000, 0x02000},
8544                 { 0x00010000, 0x0c000},
8545                 { 0xffffffff, 0x00000}
8546         };
8547         struct mem_entry *mem_tbl;
8548         int err = 0;
8549         int i;
8550
8551         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8552                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8553                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8554                         mem_tbl = mem_tbl_5755;
8555                 else
8556                         mem_tbl = mem_tbl_5705;
8557         } else
8558                 mem_tbl = mem_tbl_570x;
8559
8560         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8561                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8562                     mem_tbl[i].len)) != 0)
8563                         break;
8564         }
8565         
8566         return err;
8567 }
8568
8569 #define TG3_MAC_LOOPBACK        0
8570 #define TG3_PHY_LOOPBACK        1
8571
8572 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8573 {
8574         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8575         u32 desc_idx;
8576         struct sk_buff *skb, *rx_skb;
8577         u8 *tx_data;
8578         dma_addr_t map;
8579         int num_pkts, tx_len, rx_len, i, err;
8580         struct tg3_rx_buffer_desc *desc;
8581
8582         if (loopback_mode == TG3_MAC_LOOPBACK) {
8583                 /* HW errata - mac loopback fails in some cases on 5780.
8584                  * Normal traffic and PHY loopback are not affected by
8585                  * errata.
8586                  */
8587                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8588                         return 0;
8589
8590                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8591                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8592                            MAC_MODE_PORT_MODE_GMII;
8593                 tw32(MAC_MODE, mac_mode);
8594         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8595                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8596                                            BMCR_SPEED1000);
8597                 udelay(40);
8598                 /* reset to prevent losing 1st rx packet intermittently */
8599                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8600                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8601                         udelay(10);
8602                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8603                 }
8604                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8605                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8606                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8607                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8608                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8609                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8610                 }
8611                 tw32(MAC_MODE, mac_mode);
8612         }
8613         else
8614                 return -EINVAL;
8615
8616         err = -EIO;
8617
8618         tx_len = 1514;
8619         skb = netdev_alloc_skb(tp->dev, tx_len);
8620         if (!skb)
8621                 return -ENOMEM;
8622
8623         tx_data = skb_put(skb, tx_len);
8624         memcpy(tx_data, tp->dev->dev_addr, 6);
8625         memset(tx_data + 6, 0x0, 8);
8626
8627         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8628
8629         for (i = 14; i < tx_len; i++)
8630                 tx_data[i] = (u8) (i & 0xff);
8631
8632         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8633
8634         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8635              HOSTCC_MODE_NOW);
8636
8637         udelay(10);
8638
8639         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8640
8641         num_pkts = 0;
8642
8643         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8644
8645         tp->tx_prod++;
8646         num_pkts++;
8647
8648         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8649                      tp->tx_prod);
8650         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8651
8652         udelay(10);
8653
8654         for (i = 0; i < 10; i++) {
8655                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8656                        HOSTCC_MODE_NOW);
8657
8658                 udelay(10);
8659
8660                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8661                 rx_idx = tp->hw_status->idx[0].rx_producer;
8662                 if ((tx_idx == tp->tx_prod) &&
8663                     (rx_idx == (rx_start_idx + num_pkts)))
8664                         break;
8665         }
8666
8667         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8668         dev_kfree_skb(skb);
8669
8670         if (tx_idx != tp->tx_prod)
8671                 goto out;
8672
8673         if (rx_idx != rx_start_idx + num_pkts)
8674                 goto out;
8675
8676         desc = &tp->rx_rcb[rx_start_idx];
8677         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8678         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8679         if (opaque_key != RXD_OPAQUE_RING_STD)
8680                 goto out;
8681
8682         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8683             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8684                 goto out;
8685
8686         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8687         if (rx_len != tx_len)
8688                 goto out;
8689
8690         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8691
8692         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8693         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8694
8695         for (i = 14; i < tx_len; i++) {
8696                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8697                         goto out;
8698         }
8699         err = 0;
8700         
8701         /* tg3_free_rings will unmap and free the rx_skb */
8702 out:
8703         return err;
8704 }
8705
8706 #define TG3_MAC_LOOPBACK_FAILED         1
8707 #define TG3_PHY_LOOPBACK_FAILED         2
8708 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8709                                          TG3_PHY_LOOPBACK_FAILED)
8710
8711 static int tg3_test_loopback(struct tg3 *tp)
8712 {
8713         int err = 0;
8714
8715         if (!netif_running(tp->dev))
8716                 return TG3_LOOPBACK_FAILED;
8717
8718         err = tg3_reset_hw(tp, 1);
8719         if (err)
8720                 return TG3_LOOPBACK_FAILED;
8721
8722         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8723                 err |= TG3_MAC_LOOPBACK_FAILED;
8724         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8725                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8726                         err |= TG3_PHY_LOOPBACK_FAILED;
8727         }
8728
8729         return err;
8730 }
8731
8732 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8733                           u64 *data)
8734 {
8735         struct tg3 *tp = netdev_priv(dev);
8736
8737         if (tp->link_config.phy_is_low_power)
8738                 tg3_set_power_state(tp, PCI_D0);
8739
8740         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8741
8742         if (tg3_test_nvram(tp) != 0) {
8743                 etest->flags |= ETH_TEST_FL_FAILED;
8744                 data[0] = 1;
8745         }
8746         if (tg3_test_link(tp) != 0) {
8747                 etest->flags |= ETH_TEST_FL_FAILED;
8748                 data[1] = 1;
8749         }
8750         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8751                 int err, irq_sync = 0;
8752
8753                 if (netif_running(dev)) {
8754                         tg3_netif_stop(tp);
8755                         irq_sync = 1;
8756                 }
8757
8758                 tg3_full_lock(tp, irq_sync);
8759
8760                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8761                 err = tg3_nvram_lock(tp);
8762                 tg3_halt_cpu(tp, RX_CPU_BASE);
8763                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8764                         tg3_halt_cpu(tp, TX_CPU_BASE);
8765                 if (!err)
8766                         tg3_nvram_unlock(tp);
8767
8768                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8769                         tg3_phy_reset(tp);
8770
8771                 if (tg3_test_registers(tp) != 0) {
8772                         etest->flags |= ETH_TEST_FL_FAILED;
8773                         data[2] = 1;
8774                 }
8775                 if (tg3_test_memory(tp) != 0) {
8776                         etest->flags |= ETH_TEST_FL_FAILED;
8777                         data[3] = 1;
8778                 }
8779                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8780                         etest->flags |= ETH_TEST_FL_FAILED;
8781
8782                 tg3_full_unlock(tp);
8783
8784                 if (tg3_test_interrupt(tp) != 0) {
8785                         etest->flags |= ETH_TEST_FL_FAILED;
8786                         data[5] = 1;
8787                 }
8788
8789                 tg3_full_lock(tp, 0);
8790
8791                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8792                 if (netif_running(dev)) {
8793                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8794                         if (!tg3_restart_hw(tp, 1))
8795                                 tg3_netif_start(tp);
8796                 }
8797
8798                 tg3_full_unlock(tp);
8799         }
8800         if (tp->link_config.phy_is_low_power)
8801                 tg3_set_power_state(tp, PCI_D3hot);
8802
8803 }
8804
8805 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8806 {
8807         struct mii_ioctl_data *data = if_mii(ifr);
8808         struct tg3 *tp = netdev_priv(dev);
8809         int err;
8810
8811         switch(cmd) {
8812         case SIOCGMIIPHY:
8813                 data->phy_id = PHY_ADDR;
8814
8815                 /* fallthru */
8816         case SIOCGMIIREG: {
8817                 u32 mii_regval;
8818
8819                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8820                         break;                  /* We have no PHY */
8821
8822                 if (tp->link_config.phy_is_low_power)
8823                         return -EAGAIN;
8824
8825                 spin_lock_bh(&tp->lock);
8826                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8827                 spin_unlock_bh(&tp->lock);
8828
8829                 data->val_out = mii_regval;
8830
8831                 return err;
8832         }
8833
8834         case SIOCSMIIREG:
8835                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8836                         break;                  /* We have no PHY */
8837
8838                 if (!capable(CAP_NET_ADMIN))
8839                         return -EPERM;
8840
8841                 if (tp->link_config.phy_is_low_power)
8842                         return -EAGAIN;
8843
8844                 spin_lock_bh(&tp->lock);
8845                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8846                 spin_unlock_bh(&tp->lock);
8847
8848                 return err;
8849
8850         default:
8851                 /* do nothing */
8852                 break;
8853         }
8854         return -EOPNOTSUPP;
8855 }
8856
8857 #if TG3_VLAN_TAG_USED
8858 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8859 {
8860         struct tg3 *tp = netdev_priv(dev);
8861
8862         if (netif_running(dev))
8863                 tg3_netif_stop(tp);
8864
8865         tg3_full_lock(tp, 0);
8866
8867         tp->vlgrp = grp;
8868
8869         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8870         __tg3_set_rx_mode(dev);
8871
8872         tg3_full_unlock(tp);
8873
8874         if (netif_running(dev))
8875                 tg3_netif_start(tp);
8876 }
8877
8878 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8879 {
8880         struct tg3 *tp = netdev_priv(dev);
8881
8882         if (netif_running(dev))
8883                 tg3_netif_stop(tp);
8884
8885         tg3_full_lock(tp, 0);
8886         if (tp->vlgrp)
8887                 tp->vlgrp->vlan_devices[vid] = NULL;
8888         tg3_full_unlock(tp);
8889
8890         if (netif_running(dev))
8891                 tg3_netif_start(tp);
8892 }
8893 #endif
8894
8895 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8896 {
8897         struct tg3 *tp = netdev_priv(dev);
8898
8899         memcpy(ec, &tp->coal, sizeof(*ec));
8900         return 0;
8901 }
8902
8903 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8904 {
8905         struct tg3 *tp = netdev_priv(dev);
8906         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8907         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8908
8909         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8910                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8911                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8912                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8913                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8914         }
8915
8916         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8917             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8918             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8919             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8920             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8921             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8922             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8923             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8924             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8925             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8926                 return -EINVAL;
8927
8928         /* No rx interrupts will be generated if both are zero */
8929         if ((ec->rx_coalesce_usecs == 0) &&
8930             (ec->rx_max_coalesced_frames == 0))
8931                 return -EINVAL;
8932
8933         /* No tx interrupts will be generated if both are zero */
8934         if ((ec->tx_coalesce_usecs == 0) &&
8935             (ec->tx_max_coalesced_frames == 0))
8936                 return -EINVAL;
8937
8938         /* Only copy relevant parameters, ignore all others. */
8939         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8940         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8941         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8942         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8943         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8944         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8945         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8946         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8947         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8948
8949         if (netif_running(dev)) {
8950                 tg3_full_lock(tp, 0);
8951                 __tg3_set_coalesce(tp, &tp->coal);
8952                 tg3_full_unlock(tp);
8953         }
8954         return 0;
8955 }
8956
8957 static struct ethtool_ops tg3_ethtool_ops = {
8958         .get_settings           = tg3_get_settings,
8959         .set_settings           = tg3_set_settings,
8960         .get_drvinfo            = tg3_get_drvinfo,
8961         .get_regs_len           = tg3_get_regs_len,
8962         .get_regs               = tg3_get_regs,
8963         .get_wol                = tg3_get_wol,
8964         .set_wol                = tg3_set_wol,
8965         .get_msglevel           = tg3_get_msglevel,
8966         .set_msglevel           = tg3_set_msglevel,
8967         .nway_reset             = tg3_nway_reset,
8968         .get_link               = ethtool_op_get_link,
8969         .get_eeprom_len         = tg3_get_eeprom_len,
8970         .get_eeprom             = tg3_get_eeprom,
8971         .set_eeprom             = tg3_set_eeprom,
8972         .get_ringparam          = tg3_get_ringparam,
8973         .set_ringparam          = tg3_set_ringparam,
8974         .get_pauseparam         = tg3_get_pauseparam,
8975         .set_pauseparam         = tg3_set_pauseparam,
8976         .get_rx_csum            = tg3_get_rx_csum,
8977         .set_rx_csum            = tg3_set_rx_csum,
8978         .get_tx_csum            = ethtool_op_get_tx_csum,
8979         .set_tx_csum            = tg3_set_tx_csum,
8980         .get_sg                 = ethtool_op_get_sg,
8981         .set_sg                 = ethtool_op_set_sg,
8982 #if TG3_TSO_SUPPORT != 0
8983         .get_tso                = ethtool_op_get_tso,
8984         .set_tso                = tg3_set_tso,
8985 #endif
8986         .self_test_count        = tg3_get_test_count,
8987         .self_test              = tg3_self_test,
8988         .get_strings            = tg3_get_strings,
8989         .phys_id                = tg3_phys_id,
8990         .get_stats_count        = tg3_get_stats_count,
8991         .get_ethtool_stats      = tg3_get_ethtool_stats,
8992         .get_coalesce           = tg3_get_coalesce,
8993         .set_coalesce           = tg3_set_coalesce,
8994         .get_perm_addr          = ethtool_op_get_perm_addr,
8995 };
8996
8997 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8998 {
8999         u32 cursize, val, magic;
9000
9001         tp->nvram_size = EEPROM_CHIP_SIZE;
9002
9003         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9004                 return;
9005
9006         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
9007                 return;
9008
9009         /*
9010          * Size the chip by reading offsets at increasing powers of two.
9011          * When we encounter our validation signature, we know the addressing
9012          * has wrapped around, and thus have our chip size.
9013          */
9014         cursize = 0x10;
9015
9016         while (cursize < tp->nvram_size) {
9017                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9018                         return;
9019
9020                 if (val == magic)
9021                         break;
9022
9023                 cursize <<= 1;
9024         }
9025
9026         tp->nvram_size = cursize;
9027 }
9028                 
9029 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9030 {
9031         u32 val;
9032
9033         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9034                 return;
9035
9036         /* Selfboot format */
9037         if (val != TG3_EEPROM_MAGIC) {
9038                 tg3_get_eeprom_size(tp);
9039                 return;
9040         }
9041
9042         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9043                 if (val != 0) {
9044                         tp->nvram_size = (val >> 16) * 1024;
9045                         return;
9046                 }
9047         }
9048         tp->nvram_size = 0x20000;
9049 }
9050
9051 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9052 {
9053         u32 nvcfg1;
9054
9055         nvcfg1 = tr32(NVRAM_CFG1);
9056         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9057                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9058         }
9059         else {
9060                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9061                 tw32(NVRAM_CFG1, nvcfg1);
9062         }
9063
9064         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9065             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9066                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9067                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9068                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9069                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9070                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9071                                 break;
9072                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9073                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9074                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9075                                 break;
9076                         case FLASH_VENDOR_ATMEL_EEPROM:
9077                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9078                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9079                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9080                                 break;
9081                         case FLASH_VENDOR_ST:
9082                                 tp->nvram_jedecnum = JEDEC_ST;
9083                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9084                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9085                                 break;
9086                         case FLASH_VENDOR_SAIFUN:
9087                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9088                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9089                                 break;
9090                         case FLASH_VENDOR_SST_SMALL:
9091                         case FLASH_VENDOR_SST_LARGE:
9092                                 tp->nvram_jedecnum = JEDEC_SST;
9093                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9094                                 break;
9095                 }
9096         }
9097         else {
9098                 tp->nvram_jedecnum = JEDEC_ATMEL;
9099                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9100                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9101         }
9102 }
9103
9104 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9105 {
9106         u32 nvcfg1;
9107
9108         nvcfg1 = tr32(NVRAM_CFG1);
9109
9110         /* NVRAM protection for TPM */
9111         if (nvcfg1 & (1 << 27))
9112                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9113
9114         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9115                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9116                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9117                         tp->nvram_jedecnum = JEDEC_ATMEL;
9118                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9119                         break;
9120                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9121                         tp->nvram_jedecnum = JEDEC_ATMEL;
9122                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9123                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9124                         break;
9125                 case FLASH_5752VENDOR_ST_M45PE10:
9126                 case FLASH_5752VENDOR_ST_M45PE20:
9127                 case FLASH_5752VENDOR_ST_M45PE40:
9128                         tp->nvram_jedecnum = JEDEC_ST;
9129                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9130                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9131                         break;
9132         }
9133
9134         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9135                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9136                         case FLASH_5752PAGE_SIZE_256:
9137                                 tp->nvram_pagesize = 256;
9138                                 break;
9139                         case FLASH_5752PAGE_SIZE_512:
9140                                 tp->nvram_pagesize = 512;
9141                                 break;
9142                         case FLASH_5752PAGE_SIZE_1K:
9143                                 tp->nvram_pagesize = 1024;
9144                                 break;
9145                         case FLASH_5752PAGE_SIZE_2K:
9146                                 tp->nvram_pagesize = 2048;
9147                                 break;
9148                         case FLASH_5752PAGE_SIZE_4K:
9149                                 tp->nvram_pagesize = 4096;
9150                                 break;
9151                         case FLASH_5752PAGE_SIZE_264:
9152                                 tp->nvram_pagesize = 264;
9153                                 break;
9154                 }
9155         }
9156         else {
9157                 /* For eeprom, set pagesize to maximum eeprom size */
9158                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9159
9160                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9161                 tw32(NVRAM_CFG1, nvcfg1);
9162         }
9163 }
9164
9165 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9166 {
9167         u32 nvcfg1;
9168
9169         nvcfg1 = tr32(NVRAM_CFG1);
9170
9171         /* NVRAM protection for TPM */
9172         if (nvcfg1 & (1 << 27))
9173                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9174
9175         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9176                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9177                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9178                         tp->nvram_jedecnum = JEDEC_ATMEL;
9179                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9180                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9181
9182                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9183                         tw32(NVRAM_CFG1, nvcfg1);
9184                         break;
9185                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9186                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9187                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9188                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9189                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9190                         tp->nvram_jedecnum = JEDEC_ATMEL;
9191                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9192                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9193                         tp->nvram_pagesize = 264;
9194                         break;
9195                 case FLASH_5752VENDOR_ST_M45PE10:
9196                 case FLASH_5752VENDOR_ST_M45PE20:
9197                 case FLASH_5752VENDOR_ST_M45PE40:
9198                         tp->nvram_jedecnum = JEDEC_ST;
9199                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9200                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9201                         tp->nvram_pagesize = 256;
9202                         break;
9203         }
9204 }
9205
9206 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9207 {
9208         u32 nvcfg1;
9209
9210         nvcfg1 = tr32(NVRAM_CFG1);
9211
9212         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9213                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9214                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9215                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9216                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9217                         tp->nvram_jedecnum = JEDEC_ATMEL;
9218                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9219                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9220
9221                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9222                         tw32(NVRAM_CFG1, nvcfg1);
9223                         break;
9224                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9225                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9226                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9227                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9228                         tp->nvram_jedecnum = JEDEC_ATMEL;
9229                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9230                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9231                         tp->nvram_pagesize = 264;
9232                         break;
9233                 case FLASH_5752VENDOR_ST_M45PE10:
9234                 case FLASH_5752VENDOR_ST_M45PE20:
9235                 case FLASH_5752VENDOR_ST_M45PE40:
9236                         tp->nvram_jedecnum = JEDEC_ST;
9237                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9238                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9239                         tp->nvram_pagesize = 256;
9240                         break;
9241         }
9242 }
9243
9244 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9245 static void __devinit tg3_nvram_init(struct tg3 *tp)
9246 {
9247         int j;
9248
9249         tw32_f(GRC_EEPROM_ADDR,
9250              (EEPROM_ADDR_FSM_RESET |
9251               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9252                EEPROM_ADDR_CLKPERD_SHIFT)));
9253
9254         /* XXX schedule_timeout() ... */
9255         for (j = 0; j < 100; j++)
9256                 udelay(10);
9257
9258         /* Enable seeprom accesses. */
9259         tw32_f(GRC_LOCAL_CTRL,
9260              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9261         udelay(100);
9262
9263         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9264             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9265                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9266
9267                 if (tg3_nvram_lock(tp)) {
9268                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9269                                "tg3_nvram_init failed.\n", tp->dev->name);
9270                         return;
9271                 }
9272                 tg3_enable_nvram_access(tp);
9273
9274                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9275                         tg3_get_5752_nvram_info(tp);
9276                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9277                         tg3_get_5755_nvram_info(tp);
9278                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9279                         tg3_get_5787_nvram_info(tp);
9280                 else
9281                         tg3_get_nvram_info(tp);
9282
9283                 tg3_get_nvram_size(tp);
9284
9285                 tg3_disable_nvram_access(tp);
9286                 tg3_nvram_unlock(tp);
9287
9288         } else {
9289                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9290
9291                 tg3_get_eeprom_size(tp);
9292         }
9293 }
9294
9295 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9296                                         u32 offset, u32 *val)
9297 {
9298         u32 tmp;
9299         int i;
9300
9301         if (offset > EEPROM_ADDR_ADDR_MASK ||
9302             (offset % 4) != 0)
9303                 return -EINVAL;
9304
9305         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9306                                         EEPROM_ADDR_DEVID_MASK |
9307                                         EEPROM_ADDR_READ);
9308         tw32(GRC_EEPROM_ADDR,
9309              tmp |
9310              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9311              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9312               EEPROM_ADDR_ADDR_MASK) |
9313              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9314
9315         for (i = 0; i < 10000; i++) {
9316                 tmp = tr32(GRC_EEPROM_ADDR);
9317
9318                 if (tmp & EEPROM_ADDR_COMPLETE)
9319                         break;
9320                 udelay(100);
9321         }
9322         if (!(tmp & EEPROM_ADDR_COMPLETE))
9323                 return -EBUSY;
9324
9325         *val = tr32(GRC_EEPROM_DATA);
9326         return 0;
9327 }
9328
9329 #define NVRAM_CMD_TIMEOUT 10000
9330
9331 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9332 {
9333         int i;
9334
9335         tw32(NVRAM_CMD, nvram_cmd);
9336         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9337                 udelay(10);
9338                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9339                         udelay(10);
9340                         break;
9341                 }
9342         }
9343         if (i == NVRAM_CMD_TIMEOUT) {
9344                 return -EBUSY;
9345         }
9346         return 0;
9347 }
9348
9349 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9350 {
9351         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9352             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9353             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9354             (tp->nvram_jedecnum == JEDEC_ATMEL))
9355
9356                 addr = ((addr / tp->nvram_pagesize) <<
9357                         ATMEL_AT45DB0X1B_PAGE_POS) +
9358                        (addr % tp->nvram_pagesize);
9359
9360         return addr;
9361 }
9362
9363 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9364 {
9365         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9366             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9367             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9368             (tp->nvram_jedecnum == JEDEC_ATMEL))
9369
9370                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9371                         tp->nvram_pagesize) +
9372                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9373
9374         return addr;
9375 }
9376
9377 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9378 {
9379         int ret;
9380
9381         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9382                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9383
9384         offset = tg3_nvram_phys_addr(tp, offset);
9385
9386         if (offset > NVRAM_ADDR_MSK)
9387                 return -EINVAL;
9388
9389         ret = tg3_nvram_lock(tp);
9390         if (ret)
9391                 return ret;
9392
9393         tg3_enable_nvram_access(tp);
9394
9395         tw32(NVRAM_ADDR, offset);
9396         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9397                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9398
9399         if (ret == 0)
9400                 *val = swab32(tr32(NVRAM_RDDATA));
9401
9402         tg3_disable_nvram_access(tp);
9403
9404         tg3_nvram_unlock(tp);
9405
9406         return ret;
9407 }
9408
9409 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9410 {
9411         int err;
9412         u32 tmp;
9413
9414         err = tg3_nvram_read(tp, offset, &tmp);
9415         *val = swab32(tmp);
9416         return err;
9417 }
9418
9419 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9420                                     u32 offset, u32 len, u8 *buf)
9421 {
9422         int i, j, rc = 0;
9423         u32 val;
9424
9425         for (i = 0; i < len; i += 4) {
9426                 u32 addr, data;
9427
9428                 addr = offset + i;
9429
9430                 memcpy(&data, buf + i, 4);
9431
9432                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9433
9434                 val = tr32(GRC_EEPROM_ADDR);
9435                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9436
9437                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9438                         EEPROM_ADDR_READ);
9439                 tw32(GRC_EEPROM_ADDR, val |
9440                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9441                         (addr & EEPROM_ADDR_ADDR_MASK) |
9442                         EEPROM_ADDR_START |
9443                         EEPROM_ADDR_WRITE);
9444                 
9445                 for (j = 0; j < 10000; j++) {
9446                         val = tr32(GRC_EEPROM_ADDR);
9447
9448                         if (val & EEPROM_ADDR_COMPLETE)
9449                                 break;
9450                         udelay(100);
9451                 }
9452                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9453                         rc = -EBUSY;
9454                         break;
9455                 }
9456         }
9457
9458         return rc;
9459 }
9460
9461 /* offset and length are dword aligned */
9462 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9463                 u8 *buf)
9464 {
9465         int ret = 0;
9466         u32 pagesize = tp->nvram_pagesize;
9467         u32 pagemask = pagesize - 1;
9468         u32 nvram_cmd;
9469         u8 *tmp;
9470
9471         tmp = kmalloc(pagesize, GFP_KERNEL);
9472         if (tmp == NULL)
9473                 return -ENOMEM;
9474
9475         while (len) {
9476                 int j;
9477                 u32 phy_addr, page_off, size;
9478
9479                 phy_addr = offset & ~pagemask;
9480         
9481                 for (j = 0; j < pagesize; j += 4) {
9482                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9483                                                 (u32 *) (tmp + j))))
9484                                 break;
9485                 }
9486                 if (ret)
9487                         break;
9488
9489                 page_off = offset & pagemask;
9490                 size = pagesize;
9491                 if (len < size)
9492                         size = len;
9493
9494                 len -= size;
9495
9496                 memcpy(tmp + page_off, buf, size);
9497
9498                 offset = offset + (pagesize - page_off);
9499
9500                 tg3_enable_nvram_access(tp);
9501
9502                 /*
9503                  * Before we can erase the flash page, we need
9504                  * to issue a special "write enable" command.
9505                  */
9506                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9507
9508                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9509                         break;
9510
9511                 /* Erase the target page */
9512                 tw32(NVRAM_ADDR, phy_addr);
9513
9514                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9515                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9516
9517                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9518                         break;
9519
9520                 /* Issue another write enable to start the write. */
9521                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9522
9523                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9524                         break;
9525
9526                 for (j = 0; j < pagesize; j += 4) {
9527                         u32 data;
9528
9529                         data = *((u32 *) (tmp + j));
9530                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9531
9532                         tw32(NVRAM_ADDR, phy_addr + j);
9533
9534                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9535                                 NVRAM_CMD_WR;
9536
9537                         if (j == 0)
9538                                 nvram_cmd |= NVRAM_CMD_FIRST;
9539                         else if (j == (pagesize - 4))
9540                                 nvram_cmd |= NVRAM_CMD_LAST;
9541
9542                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9543                                 break;
9544                 }
9545                 if (ret)
9546                         break;
9547         }
9548
9549         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9550         tg3_nvram_exec_cmd(tp, nvram_cmd);
9551
9552         kfree(tmp);
9553
9554         return ret;
9555 }
9556
9557 /* offset and length are dword aligned */
9558 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9559                 u8 *buf)
9560 {
9561         int i, ret = 0;
9562
9563         for (i = 0; i < len; i += 4, offset += 4) {
9564                 u32 data, page_off, phy_addr, nvram_cmd;
9565
9566                 memcpy(&data, buf + i, 4);
9567                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9568
9569                 page_off = offset % tp->nvram_pagesize;
9570
9571                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9572
9573                 tw32(NVRAM_ADDR, phy_addr);
9574
9575                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9576
9577                 if ((page_off == 0) || (i == 0))
9578                         nvram_cmd |= NVRAM_CMD_FIRST;
9579                 if (page_off == (tp->nvram_pagesize - 4))
9580                         nvram_cmd |= NVRAM_CMD_LAST;
9581
9582                 if (i == (len - 4))
9583                         nvram_cmd |= NVRAM_CMD_LAST;
9584
9585                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9586                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9587                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9588                     (tp->nvram_jedecnum == JEDEC_ST) &&
9589                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9590
9591                         if ((ret = tg3_nvram_exec_cmd(tp,
9592                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9593                                 NVRAM_CMD_DONE)))
9594
9595                                 break;
9596                 }
9597                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9598                         /* We always do complete word writes to eeprom. */
9599                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9600                 }
9601
9602                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9603                         break;
9604         }
9605         return ret;
9606 }
9607
9608 /* offset and length are dword aligned */
9609 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9610 {
9611         int ret;
9612
9613         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9614                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9615                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9616                 udelay(40);
9617         }
9618
9619         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9620                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9621         }
9622         else {
9623                 u32 grc_mode;
9624
9625                 ret = tg3_nvram_lock(tp);
9626                 if (ret)
9627                         return ret;
9628
9629                 tg3_enable_nvram_access(tp);
9630                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9631                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9632                         tw32(NVRAM_WRITE1, 0x406);
9633
9634                 grc_mode = tr32(GRC_MODE);
9635                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9636
9637                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9638                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9639
9640                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9641                                 buf);
9642                 }
9643                 else {
9644                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9645                                 buf);
9646                 }
9647
9648                 grc_mode = tr32(GRC_MODE);
9649                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9650
9651                 tg3_disable_nvram_access(tp);
9652                 tg3_nvram_unlock(tp);
9653         }
9654
9655         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9656                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9657                 udelay(40);
9658         }
9659
9660         return ret;
9661 }
9662
9663 struct subsys_tbl_ent {
9664         u16 subsys_vendor, subsys_devid;
9665         u32 phy_id;
9666 };
9667
9668 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9669         /* Broadcom boards. */
9670         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9671         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9672         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9673         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9674         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9675         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9676         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9677         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9678         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9679         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9680         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9681
9682         /* 3com boards. */
9683         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9684         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9685         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9686         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9687         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9688
9689         /* DELL boards. */
9690         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9691         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9692         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9693         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9694
9695         /* Compaq boards. */
9696         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9697         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9698         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9699         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9700         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9701
9702         /* IBM boards. */
9703         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9704 };
9705
9706 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9707 {
9708         int i;
9709
9710         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9711                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9712                      tp->pdev->subsystem_vendor) &&
9713                     (subsys_id_to_phy_id[i].subsys_devid ==
9714                      tp->pdev->subsystem_device))
9715                         return &subsys_id_to_phy_id[i];
9716         }
9717         return NULL;
9718 }
9719
9720 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9721 {
9722         u32 val;
9723         u16 pmcsr;
9724
9725         /* On some early chips the SRAM cannot be accessed in D3hot state,
9726          * so need make sure we're in D0.
9727          */
9728         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9729         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9730         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9731         msleep(1);
9732
9733         /* Make sure register accesses (indirect or otherwise)
9734          * will function correctly.
9735          */
9736         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9737                                tp->misc_host_ctrl);
9738
9739         /* The memory arbiter has to be enabled in order for SRAM accesses
9740          * to succeed.  Normally on powerup the tg3 chip firmware will make
9741          * sure it is enabled, but other entities such as system netboot
9742          * code might disable it.
9743          */
9744         val = tr32(MEMARB_MODE);
9745         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9746
9747         tp->phy_id = PHY_ID_INVALID;
9748         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9749
9750         /* Assume an onboard device by default.  */
9751         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9752
9753         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9754         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9755                 u32 nic_cfg, led_cfg;
9756                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9757                 int eeprom_phy_serdes = 0;
9758
9759                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9760                 tp->nic_sram_data_cfg = nic_cfg;
9761
9762                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9763                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9764                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9765                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9766                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9767                     (ver > 0) && (ver < 0x100))
9768                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9769
9770                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9771                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9772                         eeprom_phy_serdes = 1;
9773
9774                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9775                 if (nic_phy_id != 0) {
9776                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9777                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9778
9779                         eeprom_phy_id  = (id1 >> 16) << 10;
9780                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9781                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9782                 } else
9783                         eeprom_phy_id = 0;
9784
9785                 tp->phy_id = eeprom_phy_id;
9786                 if (eeprom_phy_serdes) {
9787                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9788                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9789                         else
9790                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9791                 }
9792
9793                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9794                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9795                                     SHASTA_EXT_LED_MODE_MASK);
9796                 else
9797                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9798
9799                 switch (led_cfg) {
9800                 default:
9801                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9802                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9803                         break;
9804
9805                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9806                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9807                         break;
9808
9809                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9810                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9811
9812                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9813                          * read on some older 5700/5701 bootcode.
9814                          */
9815                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9816                             ASIC_REV_5700 ||
9817                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9818                             ASIC_REV_5701)
9819                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9820
9821                         break;
9822
9823                 case SHASTA_EXT_LED_SHARED:
9824                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9825                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9826                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9827                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9828                                                  LED_CTRL_MODE_PHY_2);
9829                         break;
9830
9831                 case SHASTA_EXT_LED_MAC:
9832                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9833                         break;
9834
9835                 case SHASTA_EXT_LED_COMBO:
9836                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9837                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9838                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9839                                                  LED_CTRL_MODE_PHY_2);
9840                         break;
9841
9842                 };
9843
9844                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9845                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9846                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9847                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9848
9849                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9850                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9851                 else
9852                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9853
9854                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9855                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9856                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9857                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9858                 }
9859                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9860                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9861
9862                 if (cfg2 & (1 << 17))
9863                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9864
9865                 /* serdes signal pre-emphasis in register 0x590 set by */
9866                 /* bootcode if bit 18 is set */
9867                 if (cfg2 & (1 << 18))
9868                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9869         }
9870 }
9871
9872 static int __devinit tg3_phy_probe(struct tg3 *tp)
9873 {
9874         u32 hw_phy_id_1, hw_phy_id_2;
9875         u32 hw_phy_id, hw_phy_id_masked;
9876         int err;
9877
9878         /* Reading the PHY ID register can conflict with ASF
9879          * firwmare access to the PHY hardware.
9880          */
9881         err = 0;
9882         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9883                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9884         } else {
9885                 /* Now read the physical PHY_ID from the chip and verify
9886                  * that it is sane.  If it doesn't look good, we fall back
9887                  * to either the hard-coded table based PHY_ID and failing
9888                  * that the value found in the eeprom area.
9889                  */
9890                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9891                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9892
9893                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9894                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9895                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9896
9897                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9898         }
9899
9900         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9901                 tp->phy_id = hw_phy_id;
9902                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9903                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9904                 else
9905                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9906         } else {
9907                 if (tp->phy_id != PHY_ID_INVALID) {
9908                         /* Do nothing, phy ID already set up in
9909                          * tg3_get_eeprom_hw_cfg().
9910                          */
9911                 } else {
9912                         struct subsys_tbl_ent *p;
9913
9914                         /* No eeprom signature?  Try the hardcoded
9915                          * subsys device table.
9916                          */
9917                         p = lookup_by_subsys(tp);
9918                         if (!p)
9919                                 return -ENODEV;
9920
9921                         tp->phy_id = p->phy_id;
9922                         if (!tp->phy_id ||
9923                             tp->phy_id == PHY_ID_BCM8002)
9924                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9925                 }
9926         }
9927
9928         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9929             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9930                 u32 bmsr, adv_reg, tg3_ctrl;
9931
9932                 tg3_readphy(tp, MII_BMSR, &bmsr);
9933                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9934                     (bmsr & BMSR_LSTATUS))
9935                         goto skip_phy_reset;
9936                     
9937                 err = tg3_phy_reset(tp);
9938                 if (err)
9939                         return err;
9940
9941                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9942                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9943                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9944                 tg3_ctrl = 0;
9945                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9946                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9947                                     MII_TG3_CTRL_ADV_1000_FULL);
9948                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9949                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9950                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9951                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9952                 }
9953
9954                 if (!tg3_copper_is_advertising_all(tp)) {
9955                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9956
9957                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9958                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9959
9960                         tg3_writephy(tp, MII_BMCR,
9961                                      BMCR_ANENABLE | BMCR_ANRESTART);
9962                 }
9963                 tg3_phy_set_wirespeed(tp);
9964
9965                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9966                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9967                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9968         }
9969
9970 skip_phy_reset:
9971         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9972                 err = tg3_init_5401phy_dsp(tp);
9973                 if (err)
9974                         return err;
9975         }
9976
9977         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9978                 err = tg3_init_5401phy_dsp(tp);
9979         }
9980
9981         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9982                 tp->link_config.advertising =
9983                         (ADVERTISED_1000baseT_Half |
9984                          ADVERTISED_1000baseT_Full |
9985                          ADVERTISED_Autoneg |
9986                          ADVERTISED_FIBRE);
9987         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9988                 tp->link_config.advertising &=
9989                         ~(ADVERTISED_1000baseT_Half |
9990                           ADVERTISED_1000baseT_Full);
9991
9992         return err;
9993 }
9994
9995 static void __devinit tg3_read_partno(struct tg3 *tp)
9996 {
9997         unsigned char vpd_data[256];
9998         int i;
9999         u32 magic;
10000
10001         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10002                 goto out_not_found;
10003
10004         if (magic == TG3_EEPROM_MAGIC) {
10005                 for (i = 0; i < 256; i += 4) {
10006                         u32 tmp;
10007
10008                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10009                                 goto out_not_found;
10010
10011                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10012                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10013                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10014                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10015                 }
10016         } else {
10017                 int vpd_cap;
10018
10019                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10020                 for (i = 0; i < 256; i += 4) {
10021                         u32 tmp, j = 0;
10022                         u16 tmp16;
10023
10024                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10025                                               i);
10026                         while (j++ < 100) {
10027                                 pci_read_config_word(tp->pdev, vpd_cap +
10028                                                      PCI_VPD_ADDR, &tmp16);
10029                                 if (tmp16 & 0x8000)
10030                                         break;
10031                                 msleep(1);
10032                         }
10033                         if (!(tmp16 & 0x8000))
10034                                 goto out_not_found;
10035
10036                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10037                                               &tmp);
10038                         tmp = cpu_to_le32(tmp);
10039                         memcpy(&vpd_data[i], &tmp, 4);
10040                 }
10041         }
10042
10043         /* Now parse and find the part number. */
10044         for (i = 0; i < 256; ) {
10045                 unsigned char val = vpd_data[i];
10046                 int block_end;
10047
10048                 if (val == 0x82 || val == 0x91) {
10049                         i = (i + 3 +
10050                              (vpd_data[i + 1] +
10051                               (vpd_data[i + 2] << 8)));
10052                         continue;
10053                 }
10054
10055                 if (val != 0x90)
10056                         goto out_not_found;
10057
10058                 block_end = (i + 3 +
10059                              (vpd_data[i + 1] +
10060                               (vpd_data[i + 2] << 8)));
10061                 i += 3;
10062                 while (i < block_end) {
10063                         if (vpd_data[i + 0] == 'P' &&
10064                             vpd_data[i + 1] == 'N') {
10065                                 int partno_len = vpd_data[i + 2];
10066
10067                                 if (partno_len > 24)
10068                                         goto out_not_found;
10069
10070                                 memcpy(tp->board_part_number,
10071                                        &vpd_data[i + 3],
10072                                        partno_len);
10073
10074                                 /* Success. */
10075                                 return;
10076                         }
10077                 }
10078
10079                 /* Part number not found. */
10080                 goto out_not_found;
10081         }
10082
10083 out_not_found:
10084         strcpy(tp->board_part_number, "none");
10085 }
10086
10087 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10088 {
10089         u32 val, offset, start;
10090
10091         if (tg3_nvram_read_swab(tp, 0, &val))
10092                 return;
10093
10094         if (val != TG3_EEPROM_MAGIC)
10095                 return;
10096
10097         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10098             tg3_nvram_read_swab(tp, 0x4, &start))
10099                 return;
10100
10101         offset = tg3_nvram_logical_addr(tp, offset);
10102         if (tg3_nvram_read_swab(tp, offset, &val))
10103                 return;
10104
10105         if ((val & 0xfc000000) == 0x0c000000) {
10106                 u32 ver_offset, addr;
10107                 int i;
10108
10109                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10110                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10111                         return;
10112
10113                 if (val != 0)
10114                         return;
10115
10116                 addr = offset + ver_offset - start;
10117                 for (i = 0; i < 16; i += 4) {
10118                         if (tg3_nvram_read(tp, addr + i, &val))
10119                                 return;
10120
10121                         val = cpu_to_le32(val);
10122                         memcpy(tp->fw_ver + i, &val, 4);
10123                 }
10124         }
10125 }
10126
10127 static int __devinit tg3_get_invariants(struct tg3 *tp)
10128 {
10129         static struct pci_device_id write_reorder_chipsets[] = {
10130                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10131                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10132                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10133                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10134                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10135                              PCI_DEVICE_ID_VIA_8385_0) },
10136                 { },
10137         };
10138         u32 misc_ctrl_reg;
10139         u32 cacheline_sz_reg;
10140         u32 pci_state_reg, grc_misc_cfg;
10141         u32 val;
10142         u16 pci_cmd;
10143         int err;
10144
10145         /* Force memory write invalidate off.  If we leave it on,
10146          * then on 5700_BX chips we have to enable a workaround.
10147          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10148          * to match the cacheline size.  The Broadcom driver have this
10149          * workaround but turns MWI off all the times so never uses
10150          * it.  This seems to suggest that the workaround is insufficient.
10151          */
10152         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10153         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10154         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10155
10156         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10157          * has the register indirect write enable bit set before
10158          * we try to access any of the MMIO registers.  It is also
10159          * critical that the PCI-X hw workaround situation is decided
10160          * before that as well.
10161          */
10162         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10163                               &misc_ctrl_reg);
10164
10165         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10166                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10167
10168         /* Wrong chip ID in 5752 A0. This code can be removed later
10169          * as A0 is not in production.
10170          */
10171         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10172                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10173
10174         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10175          * we need to disable memory and use config. cycles
10176          * only to access all registers. The 5702/03 chips
10177          * can mistakenly decode the special cycles from the
10178          * ICH chipsets as memory write cycles, causing corruption
10179          * of register and memory space. Only certain ICH bridges
10180          * will drive special cycles with non-zero data during the
10181          * address phase which can fall within the 5703's address
10182          * range. This is not an ICH bug as the PCI spec allows
10183          * non-zero address during special cycles. However, only
10184          * these ICH bridges are known to drive non-zero addresses
10185          * during special cycles.
10186          *
10187          * Since special cycles do not cross PCI bridges, we only
10188          * enable this workaround if the 5703 is on the secondary
10189          * bus of these ICH bridges.
10190          */
10191         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10192             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10193                 static struct tg3_dev_id {
10194                         u32     vendor;
10195                         u32     device;
10196                         u32     rev;
10197                 } ich_chipsets[] = {
10198                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10199                           PCI_ANY_ID },
10200                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10201                           PCI_ANY_ID },
10202                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10203                           0xa },
10204                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10205                           PCI_ANY_ID },
10206                         { },
10207                 };
10208                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10209                 struct pci_dev *bridge = NULL;
10210
10211                 while (pci_id->vendor != 0) {
10212                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10213                                                 bridge);
10214                         if (!bridge) {
10215                                 pci_id++;
10216                                 continue;
10217                         }
10218                         if (pci_id->rev != PCI_ANY_ID) {
10219                                 u8 rev;
10220
10221                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10222                                                      &rev);
10223                                 if (rev > pci_id->rev)
10224                                         continue;
10225                         }
10226                         if (bridge->subordinate &&
10227                             (bridge->subordinate->number ==
10228                              tp->pdev->bus->number)) {
10229
10230                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10231                                 pci_dev_put(bridge);
10232                                 break;
10233                         }
10234                 }
10235         }
10236
10237         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10238          * DMA addresses > 40-bit. This bridge may have other additional
10239          * 57xx devices behind it in some 4-port NIC designs for example.
10240          * Any tg3 device found behind the bridge will also need the 40-bit
10241          * DMA workaround.
10242          */
10243         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10244             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10245                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10246                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10247                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10248         }
10249         else {
10250                 struct pci_dev *bridge = NULL;
10251
10252                 do {
10253                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10254                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10255                                                 bridge);
10256                         if (bridge && bridge->subordinate &&
10257                             (bridge->subordinate->number <=
10258                              tp->pdev->bus->number) &&
10259                             (bridge->subordinate->subordinate >=
10260                              tp->pdev->bus->number)) {
10261                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10262                                 pci_dev_put(bridge);
10263                                 break;
10264                         }
10265                 } while (bridge);
10266         }
10267
10268         /* Initialize misc host control in PCI block. */
10269         tp->misc_host_ctrl |= (misc_ctrl_reg &
10270                                MISC_HOST_CTRL_CHIPREV);
10271         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10272                                tp->misc_host_ctrl);
10273
10274         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10275                               &cacheline_sz_reg);
10276
10277         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10278         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10279         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10280         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10281
10282         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10283             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10284             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10285             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10286             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10287                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10288
10289         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10290             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10291                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10292
10293         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10294                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10295                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10296                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10297                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10298                 } else {
10299                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10300                                           TG3_FLG2_HW_TSO_1_BUG;
10301                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10302                                 ASIC_REV_5750 &&
10303                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10304                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10305                 }
10306         }
10307
10308         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10309             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10310             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10311             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10312             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10313                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10314
10315         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10316                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10317
10318         /* If we have an AMD 762 or VIA K8T800 chipset, write
10319          * reordering to the mailbox registers done by the host
10320          * controller can cause major troubles.  We read back from
10321          * every mailbox register write to force the writes to be
10322          * posted to the chip in order.
10323          */
10324         if (pci_dev_present(write_reorder_chipsets) &&
10325             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10326                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10327
10328         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10329             tp->pci_lat_timer < 64) {
10330                 tp->pci_lat_timer = 64;
10331
10332                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10333                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10334                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10335                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10336
10337                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10338                                        cacheline_sz_reg);
10339         }
10340
10341         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10342                               &pci_state_reg);
10343
10344         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10345                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10346
10347                 /* If this is a 5700 BX chipset, and we are in PCI-X
10348                  * mode, enable register write workaround.
10349                  *
10350                  * The workaround is to use indirect register accesses
10351                  * for all chip writes not to mailbox registers.
10352                  */
10353                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10354                         u32 pm_reg;
10355                         u16 pci_cmd;
10356
10357                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10358
10359                         /* The chip can have it's power management PCI config
10360                          * space registers clobbered due to this bug.
10361                          * So explicitly force the chip into D0 here.
10362                          */
10363                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10364                                               &pm_reg);
10365                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10366                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10367                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10368                                                pm_reg);
10369
10370                         /* Also, force SERR#/PERR# in PCI command. */
10371                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10372                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10373                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10374                 }
10375         }
10376
10377         /* 5700 BX chips need to have their TX producer index mailboxes
10378          * written twice to workaround a bug.
10379          */
10380         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10381                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10382
10383         /* Back to back register writes can cause problems on this chip,
10384          * the workaround is to read back all reg writes except those to
10385          * mailbox regs.  See tg3_write_indirect_reg32().
10386          *
10387          * PCI Express 5750_A0 rev chips need this workaround too.
10388          */
10389         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10390             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10391              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10392                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10393
10394         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10395                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10396         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10397                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10398
10399         /* Chip-specific fixup from Broadcom driver */
10400         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10401             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10402                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10403                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10404         }
10405
10406         /* Default fast path register access methods */
10407         tp->read32 = tg3_read32;
10408         tp->write32 = tg3_write32;
10409         tp->read32_mbox = tg3_read32;
10410         tp->write32_mbox = tg3_write32;
10411         tp->write32_tx_mbox = tg3_write32;
10412         tp->write32_rx_mbox = tg3_write32;
10413
10414         /* Various workaround register access methods */
10415         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10416                 tp->write32 = tg3_write_indirect_reg32;
10417         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10418                 tp->write32 = tg3_write_flush_reg32;
10419
10420         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10421             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10422                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10423                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10424                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10425         }
10426
10427         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10428                 tp->read32 = tg3_read_indirect_reg32;
10429                 tp->write32 = tg3_write_indirect_reg32;
10430                 tp->read32_mbox = tg3_read_indirect_mbox;
10431                 tp->write32_mbox = tg3_write_indirect_mbox;
10432                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10433                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10434
10435                 iounmap(tp->regs);
10436                 tp->regs = NULL;
10437
10438                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10439                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10440                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10441         }
10442
10443         if (tp->write32 == tg3_write_indirect_reg32 ||
10444             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10445              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10446               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10447                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10448
10449         /* Get eeprom hw config before calling tg3_set_power_state().
10450          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10451          * determined before calling tg3_set_power_state() so that
10452          * we know whether or not to switch out of Vaux power.
10453          * When the flag is set, it means that GPIO1 is used for eeprom
10454          * write protect and also implies that it is a LOM where GPIOs
10455          * are not used to switch power.
10456          */ 
10457         tg3_get_eeprom_hw_cfg(tp);
10458
10459         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10460          * GPIO1 driven high will bring 5700's external PHY out of reset.
10461          * It is also used as eeprom write protect on LOMs.
10462          */
10463         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10464         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10465             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10466                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10467                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10468         /* Unused GPIO3 must be driven as output on 5752 because there
10469          * are no pull-up resistors on unused GPIO pins.
10470          */
10471         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10472                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10473
10474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10475                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10476
10477         /* Force the chip into D0. */
10478         err = tg3_set_power_state(tp, PCI_D0);
10479         if (err) {
10480                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10481                        pci_name(tp->pdev));
10482                 return err;
10483         }
10484
10485         /* 5700 B0 chips do not support checksumming correctly due
10486          * to hardware bugs.
10487          */
10488         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10489                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10490
10491         /* Derive initial jumbo mode from MTU assigned in
10492          * ether_setup() via the alloc_etherdev() call
10493          */
10494         if (tp->dev->mtu > ETH_DATA_LEN &&
10495             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10496                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10497
10498         /* Determine WakeOnLan speed to use. */
10499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10500             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10501             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10502             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10503                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10504         } else {
10505                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10506         }
10507
10508         /* A few boards don't want Ethernet@WireSpeed phy feature */
10509         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10510             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10511              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10512              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10513             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10514                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10515
10516         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10517             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10518                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10519         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10520                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10521
10522         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10523                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10524                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10525                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10526                 else
10527                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10528         }
10529
10530         tp->coalesce_mode = 0;
10531         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10532             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10533                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10534
10535         /* Initialize MAC MI mode, polling disabled. */
10536         tw32_f(MAC_MI_MODE, tp->mi_mode);
10537         udelay(80);
10538
10539         /* Initialize data/descriptor byte/word swapping. */
10540         val = tr32(GRC_MODE);
10541         val &= GRC_MODE_HOST_STACKUP;
10542         tw32(GRC_MODE, val | tp->grc_mode);
10543
10544         tg3_switch_clocks(tp);
10545
10546         /* Clear this out for sanity. */
10547         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10548
10549         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10550                               &pci_state_reg);
10551         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10552             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10553                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10554
10555                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10556                     chiprevid == CHIPREV_ID_5701_B0 ||
10557                     chiprevid == CHIPREV_ID_5701_B2 ||
10558                     chiprevid == CHIPREV_ID_5701_B5) {
10559                         void __iomem *sram_base;
10560
10561                         /* Write some dummy words into the SRAM status block
10562                          * area, see if it reads back correctly.  If the return
10563                          * value is bad, force enable the PCIX workaround.
10564                          */
10565                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10566
10567                         writel(0x00000000, sram_base);
10568                         writel(0x00000000, sram_base + 4);
10569                         writel(0xffffffff, sram_base + 4);
10570                         if (readl(sram_base) != 0x00000000)
10571                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10572                 }
10573         }
10574
10575         udelay(50);
10576         tg3_nvram_init(tp);
10577
10578         grc_misc_cfg = tr32(GRC_MISC_CFG);
10579         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10580
10581         /* Broadcom's driver says that CIOBE multisplit has a bug */
10582 #if 0
10583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10584             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10585                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10586                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10587         }
10588 #endif
10589         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10590             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10591              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10592                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10593
10594         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10595             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10596                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10597         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10598                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10599                                       HOSTCC_MODE_CLRTICK_TXBD);
10600
10601                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10602                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10603                                        tp->misc_host_ctrl);
10604         }
10605
10606         /* these are limited to 10/100 only */
10607         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10608              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10609             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10610              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10611              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10612               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10613               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10614             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10615              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10616               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10617                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10618
10619         err = tg3_phy_probe(tp);
10620         if (err) {
10621                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10622                        pci_name(tp->pdev), err);
10623                 /* ... but do not return immediately ... */
10624         }
10625
10626         tg3_read_partno(tp);
10627         tg3_read_fw_ver(tp);
10628
10629         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10630                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10631         } else {
10632                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10633                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10634                 else
10635                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10636         }
10637
10638         /* 5700 {AX,BX} chips have a broken status block link
10639          * change bit implementation, so we must use the
10640          * status register in those cases.
10641          */
10642         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10643                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10644         else
10645                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10646
10647         /* The led_ctrl is set during tg3_phy_probe, here we might
10648          * have to force the link status polling mechanism based
10649          * upon subsystem IDs.
10650          */
10651         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10652             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10653                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10654                                   TG3_FLAG_USE_LINKCHG_REG);
10655         }
10656
10657         /* For all SERDES we poll the MAC status register. */
10658         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10659                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10660         else
10661                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10662
10663         /* All chips before 5787 can get confused if TX buffers
10664          * straddle the 4GB address boundary in some cases.
10665          */
10666         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10667             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10668                 tp->dev->hard_start_xmit = tg3_start_xmit;
10669         else
10670                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10671
10672         tp->rx_offset = 2;
10673         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10674             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10675                 tp->rx_offset = 0;
10676
10677         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10678
10679         /* Increment the rx prod index on the rx std ring by at most
10680          * 8 for these chips to workaround hw errata.
10681          */
10682         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10683             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10684             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10685                 tp->rx_std_max_post = 8;
10686
10687         /* By default, disable wake-on-lan.  User can change this
10688          * using ETHTOOL_SWOL.
10689          */
10690         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10691
10692         return err;
10693 }
10694
10695 #ifdef CONFIG_SPARC64
10696 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10697 {
10698         struct net_device *dev = tp->dev;
10699         struct pci_dev *pdev = tp->pdev;
10700         struct pcidev_cookie *pcp = pdev->sysdata;
10701
10702         if (pcp != NULL) {
10703                 unsigned char *addr;
10704                 int len;
10705
10706                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10707                                         &len);
10708                 if (addr && len == 6) {
10709                         memcpy(dev->dev_addr, addr, 6);
10710                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10711                         return 0;
10712                 }
10713         }
10714         return -ENODEV;
10715 }
10716
10717 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10718 {
10719         struct net_device *dev = tp->dev;
10720
10721         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10722         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10723         return 0;
10724 }
10725 #endif
10726
10727 static int __devinit tg3_get_device_address(struct tg3 *tp)
10728 {
10729         struct net_device *dev = tp->dev;
10730         u32 hi, lo, mac_offset;
10731         int addr_ok = 0;
10732
10733 #ifdef CONFIG_SPARC64
10734         if (!tg3_get_macaddr_sparc(tp))
10735                 return 0;
10736 #endif
10737
10738         mac_offset = 0x7c;
10739         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10740             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10741                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10742                         mac_offset = 0xcc;
10743                 if (tg3_nvram_lock(tp))
10744                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10745                 else
10746                         tg3_nvram_unlock(tp);
10747         }
10748
10749         /* First try to get it from MAC address mailbox. */
10750         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10751         if ((hi >> 16) == 0x484b) {
10752                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10753                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10754
10755                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10756                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10757                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10758                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10759                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10760
10761                 /* Some old bootcode may report a 0 MAC address in SRAM */
10762                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10763         }
10764         if (!addr_ok) {
10765                 /* Next, try NVRAM. */
10766                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10767                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10768                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10769                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10770                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10771                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10772                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10773                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10774                 }
10775                 /* Finally just fetch it out of the MAC control regs. */
10776                 else {
10777                         hi = tr32(MAC_ADDR_0_HIGH);
10778                         lo = tr32(MAC_ADDR_0_LOW);
10779
10780                         dev->dev_addr[5] = lo & 0xff;
10781                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10782                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10783                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10784                         dev->dev_addr[1] = hi & 0xff;
10785                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10786                 }
10787         }
10788
10789         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10790 #ifdef CONFIG_SPARC64
10791                 if (!tg3_get_default_macaddr_sparc(tp))
10792                         return 0;
10793 #endif
10794                 return -EINVAL;
10795         }
10796         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10797         return 0;
10798 }
10799
10800 #define BOUNDARY_SINGLE_CACHELINE       1
10801 #define BOUNDARY_MULTI_CACHELINE        2
10802
10803 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10804 {
10805         int cacheline_size;
10806         u8 byte;
10807         int goal;
10808
10809         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10810         if (byte == 0)
10811                 cacheline_size = 1024;
10812         else
10813                 cacheline_size = (int) byte * 4;
10814
10815         /* On 5703 and later chips, the boundary bits have no
10816          * effect.
10817          */
10818         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10819             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10820             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10821                 goto out;
10822
10823 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10824         goal = BOUNDARY_MULTI_CACHELINE;
10825 #else
10826 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10827         goal = BOUNDARY_SINGLE_CACHELINE;
10828 #else
10829         goal = 0;
10830 #endif
10831 #endif
10832
10833         if (!goal)
10834                 goto out;
10835
10836         /* PCI controllers on most RISC systems tend to disconnect
10837          * when a device tries to burst across a cache-line boundary.
10838          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10839          *
10840          * Unfortunately, for PCI-E there are only limited
10841          * write-side controls for this, and thus for reads
10842          * we will still get the disconnects.  We'll also waste
10843          * these PCI cycles for both read and write for chips
10844          * other than 5700 and 5701 which do not implement the
10845          * boundary bits.
10846          */
10847         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10848             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10849                 switch (cacheline_size) {
10850                 case 16:
10851                 case 32:
10852                 case 64:
10853                 case 128:
10854                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10855                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10856                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10857                         } else {
10858                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10859                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10860                         }
10861                         break;
10862
10863                 case 256:
10864                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10865                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10866                         break;
10867
10868                 default:
10869                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10870                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10871                         break;
10872                 };
10873         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10874                 switch (cacheline_size) {
10875                 case 16:
10876                 case 32:
10877                 case 64:
10878                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10879                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10880                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10881                                 break;
10882                         }
10883                         /* fallthrough */
10884                 case 128:
10885                 default:
10886                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10887                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10888                         break;
10889                 };
10890         } else {
10891                 switch (cacheline_size) {
10892                 case 16:
10893                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10894                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10895                                         DMA_RWCTRL_WRITE_BNDRY_16);
10896                                 break;
10897                         }
10898                         /* fallthrough */
10899                 case 32:
10900                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10901                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10902                                         DMA_RWCTRL_WRITE_BNDRY_32);
10903                                 break;
10904                         }
10905                         /* fallthrough */
10906                 case 64:
10907                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10908                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10909                                         DMA_RWCTRL_WRITE_BNDRY_64);
10910                                 break;
10911                         }
10912                         /* fallthrough */
10913                 case 128:
10914                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10915                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10916                                         DMA_RWCTRL_WRITE_BNDRY_128);
10917                                 break;
10918                         }
10919                         /* fallthrough */
10920                 case 256:
10921                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10922                                 DMA_RWCTRL_WRITE_BNDRY_256);
10923                         break;
10924                 case 512:
10925                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10926                                 DMA_RWCTRL_WRITE_BNDRY_512);
10927                         break;
10928                 case 1024:
10929                 default:
10930                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10931                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10932                         break;
10933                 };
10934         }
10935
10936 out:
10937         return val;
10938 }
10939
10940 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10941 {
10942         struct tg3_internal_buffer_desc test_desc;
10943         u32 sram_dma_descs;
10944         int i, ret;
10945
10946         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10947
10948         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10949         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10950         tw32(RDMAC_STATUS, 0);
10951         tw32(WDMAC_STATUS, 0);
10952
10953         tw32(BUFMGR_MODE, 0);
10954         tw32(FTQ_RESET, 0);
10955
10956         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10957         test_desc.addr_lo = buf_dma & 0xffffffff;
10958         test_desc.nic_mbuf = 0x00002100;
10959         test_desc.len = size;
10960
10961         /*
10962          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10963          * the *second* time the tg3 driver was getting loaded after an
10964          * initial scan.
10965          *
10966          * Broadcom tells me:
10967          *   ...the DMA engine is connected to the GRC block and a DMA
10968          *   reset may affect the GRC block in some unpredictable way...
10969          *   The behavior of resets to individual blocks has not been tested.
10970          *
10971          * Broadcom noted the GRC reset will also reset all sub-components.
10972          */
10973         if (to_device) {
10974                 test_desc.cqid_sqid = (13 << 8) | 2;
10975
10976                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10977                 udelay(40);
10978         } else {
10979                 test_desc.cqid_sqid = (16 << 8) | 7;
10980
10981                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10982                 udelay(40);
10983         }
10984         test_desc.flags = 0x00000005;
10985
10986         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10987                 u32 val;
10988
10989                 val = *(((u32 *)&test_desc) + i);
10990                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10991                                        sram_dma_descs + (i * sizeof(u32)));
10992                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10993         }
10994         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10995
10996         if (to_device) {
10997                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10998         } else {
10999                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11000         }
11001
11002         ret = -ENODEV;
11003         for (i = 0; i < 40; i++) {
11004                 u32 val;
11005
11006                 if (to_device)
11007                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11008                 else
11009                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11010                 if ((val & 0xffff) == sram_dma_descs) {
11011                         ret = 0;
11012                         break;
11013                 }
11014
11015                 udelay(100);
11016         }
11017
11018         return ret;
11019 }
11020
11021 #define TEST_BUFFER_SIZE        0x2000
11022
11023 static int __devinit tg3_test_dma(struct tg3 *tp)
11024 {
11025         dma_addr_t buf_dma;
11026         u32 *buf, saved_dma_rwctrl;
11027         int ret;
11028
11029         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11030         if (!buf) {
11031                 ret = -ENOMEM;
11032                 goto out_nofree;
11033         }
11034
11035         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11036                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11037
11038         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11039
11040         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11041                 /* DMA read watermark not used on PCIE */
11042                 tp->dma_rwctrl |= 0x00180000;
11043         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11044                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11045                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11046                         tp->dma_rwctrl |= 0x003f0000;
11047                 else
11048                         tp->dma_rwctrl |= 0x003f000f;
11049         } else {
11050                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11051                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11052                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11053
11054                         /* If the 5704 is behind the EPB bridge, we can
11055                          * do the less restrictive ONE_DMA workaround for
11056                          * better performance.
11057                          */
11058                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11059                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11060                                 tp->dma_rwctrl |= 0x8000;
11061                         else if (ccval == 0x6 || ccval == 0x7)
11062                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11063
11064                         /* Set bit 23 to enable PCIX hw bug fix */
11065                         tp->dma_rwctrl |= 0x009f0000;
11066                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11067                         /* 5780 always in PCIX mode */
11068                         tp->dma_rwctrl |= 0x00144000;
11069                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11070                         /* 5714 always in PCIX mode */
11071                         tp->dma_rwctrl |= 0x00148000;
11072                 } else {
11073                         tp->dma_rwctrl |= 0x001b000f;
11074                 }
11075         }
11076
11077         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11078             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11079                 tp->dma_rwctrl &= 0xfffffff0;
11080
11081         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11082             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11083                 /* Remove this if it causes problems for some boards. */
11084                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11085
11086                 /* On 5700/5701 chips, we need to set this bit.
11087                  * Otherwise the chip will issue cacheline transactions
11088                  * to streamable DMA memory with not all the byte
11089                  * enables turned on.  This is an error on several
11090                  * RISC PCI controllers, in particular sparc64.
11091                  *
11092                  * On 5703/5704 chips, this bit has been reassigned
11093                  * a different meaning.  In particular, it is used
11094                  * on those chips to enable a PCI-X workaround.
11095                  */
11096                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11097         }
11098
11099         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11100
11101 #if 0
11102         /* Unneeded, already done by tg3_get_invariants.  */
11103         tg3_switch_clocks(tp);
11104 #endif
11105
11106         ret = 0;
11107         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11108             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11109                 goto out;
11110
11111         /* It is best to perform DMA test with maximum write burst size
11112          * to expose the 5700/5701 write DMA bug.
11113          */
11114         saved_dma_rwctrl = tp->dma_rwctrl;
11115         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11116         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11117
11118         while (1) {
11119                 u32 *p = buf, i;
11120
11121                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11122                         p[i] = i;
11123
11124                 /* Send the buffer to the chip. */
11125                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11126                 if (ret) {
11127                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11128                         break;
11129                 }
11130
11131 #if 0
11132                 /* validate data reached card RAM correctly. */
11133                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11134                         u32 val;
11135                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11136                         if (le32_to_cpu(val) != p[i]) {
11137                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11138                                 /* ret = -ENODEV here? */
11139                         }
11140                         p[i] = 0;
11141                 }
11142 #endif
11143                 /* Now read it back. */
11144                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11145                 if (ret) {
11146                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11147
11148                         break;
11149                 }
11150
11151                 /* Verify it. */
11152                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11153                         if (p[i] == i)
11154                                 continue;
11155
11156                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11157                             DMA_RWCTRL_WRITE_BNDRY_16) {
11158                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11159                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11160                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11161                                 break;
11162                         } else {
11163                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11164                                 ret = -ENODEV;
11165                                 goto out;
11166                         }
11167                 }
11168
11169                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11170                         /* Success. */
11171                         ret = 0;
11172                         break;
11173                 }
11174         }
11175         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11176             DMA_RWCTRL_WRITE_BNDRY_16) {
11177                 static struct pci_device_id dma_wait_state_chipsets[] = {
11178                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11179                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11180                         { },
11181                 };
11182
11183                 /* DMA test passed without adjusting DMA boundary,
11184                  * now look for chipsets that are known to expose the
11185                  * DMA bug without failing the test.
11186                  */
11187                 if (pci_dev_present(dma_wait_state_chipsets)) {
11188                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11189                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11190                 }
11191                 else
11192                         /* Safe to use the calculated DMA boundary. */
11193                         tp->dma_rwctrl = saved_dma_rwctrl;
11194
11195                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11196         }
11197
11198 out:
11199         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11200 out_nofree:
11201         return ret;
11202 }
11203
11204 static void __devinit tg3_init_link_config(struct tg3 *tp)
11205 {
11206         tp->link_config.advertising =
11207                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11208                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11209                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11210                  ADVERTISED_Autoneg | ADVERTISED_MII);
11211         tp->link_config.speed = SPEED_INVALID;
11212         tp->link_config.duplex = DUPLEX_INVALID;
11213         tp->link_config.autoneg = AUTONEG_ENABLE;
11214         tp->link_config.active_speed = SPEED_INVALID;
11215         tp->link_config.active_duplex = DUPLEX_INVALID;
11216         tp->link_config.phy_is_low_power = 0;
11217         tp->link_config.orig_speed = SPEED_INVALID;
11218         tp->link_config.orig_duplex = DUPLEX_INVALID;
11219         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11220 }
11221
11222 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11223 {
11224         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11225                 tp->bufmgr_config.mbuf_read_dma_low_water =
11226                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11227                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11228                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11229                 tp->bufmgr_config.mbuf_high_water =
11230                         DEFAULT_MB_HIGH_WATER_5705;
11231
11232                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11233                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11234                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11235                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11236                 tp->bufmgr_config.mbuf_high_water_jumbo =
11237                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11238         } else {
11239                 tp->bufmgr_config.mbuf_read_dma_low_water =
11240                         DEFAULT_MB_RDMA_LOW_WATER;
11241                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11242                         DEFAULT_MB_MACRX_LOW_WATER;
11243                 tp->bufmgr_config.mbuf_high_water =
11244                         DEFAULT_MB_HIGH_WATER;
11245
11246                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11247                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11248                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11249                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11250                 tp->bufmgr_config.mbuf_high_water_jumbo =
11251                         DEFAULT_MB_HIGH_WATER_JUMBO;
11252         }
11253
11254         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11255         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11256 }
11257
11258 static char * __devinit tg3_phy_string(struct tg3 *tp)
11259 {
11260         switch (tp->phy_id & PHY_ID_MASK) {
11261         case PHY_ID_BCM5400:    return "5400";
11262         case PHY_ID_BCM5401:    return "5401";
11263         case PHY_ID_BCM5411:    return "5411";
11264         case PHY_ID_BCM5701:    return "5701";
11265         case PHY_ID_BCM5703:    return "5703";
11266         case PHY_ID_BCM5704:    return "5704";
11267         case PHY_ID_BCM5705:    return "5705";
11268         case PHY_ID_BCM5750:    return "5750";
11269         case PHY_ID_BCM5752:    return "5752";
11270         case PHY_ID_BCM5714:    return "5714";
11271         case PHY_ID_BCM5780:    return "5780";
11272         case PHY_ID_BCM5755:    return "5755";
11273         case PHY_ID_BCM5787:    return "5787";
11274         case PHY_ID_BCM8002:    return "8002/serdes";
11275         case 0:                 return "serdes";
11276         default:                return "unknown";
11277         };
11278 }
11279
11280 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11281 {
11282         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11283                 strcpy(str, "PCI Express");
11284                 return str;
11285         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11286                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11287
11288                 strcpy(str, "PCIX:");
11289
11290                 if ((clock_ctrl == 7) ||
11291                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11292                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11293                         strcat(str, "133MHz");
11294                 else if (clock_ctrl == 0)
11295                         strcat(str, "33MHz");
11296                 else if (clock_ctrl == 2)
11297                         strcat(str, "50MHz");
11298                 else if (clock_ctrl == 4)
11299                         strcat(str, "66MHz");
11300                 else if (clock_ctrl == 6)
11301                         strcat(str, "100MHz");
11302         } else {
11303                 strcpy(str, "PCI:");
11304                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11305                         strcat(str, "66MHz");
11306                 else
11307                         strcat(str, "33MHz");
11308         }
11309         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11310                 strcat(str, ":32-bit");
11311         else
11312                 strcat(str, ":64-bit");
11313         return str;
11314 }
11315
11316 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11317 {
11318         struct pci_dev *peer;
11319         unsigned int func, devnr = tp->pdev->devfn & ~7;
11320
11321         for (func = 0; func < 8; func++) {
11322                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11323                 if (peer && peer != tp->pdev)
11324                         break;
11325                 pci_dev_put(peer);
11326         }
11327         /* 5704 can be configured in single-port mode, set peer to
11328          * tp->pdev in that case.
11329          */
11330         if (!peer) {
11331                 peer = tp->pdev;
11332                 return peer;
11333         }
11334
11335         /*
11336          * We don't need to keep the refcount elevated; there's no way
11337          * to remove one half of this device without removing the other
11338          */
11339         pci_dev_put(peer);
11340
11341         return peer;
11342 }
11343
11344 static void __devinit tg3_init_coal(struct tg3 *tp)
11345 {
11346         struct ethtool_coalesce *ec = &tp->coal;
11347
11348         memset(ec, 0, sizeof(*ec));
11349         ec->cmd = ETHTOOL_GCOALESCE;
11350         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11351         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11352         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11353         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11354         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11355         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11356         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11357         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11358         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11359
11360         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11361                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11362                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11363                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11364                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11365                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11366         }
11367
11368         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11369                 ec->rx_coalesce_usecs_irq = 0;
11370                 ec->tx_coalesce_usecs_irq = 0;
11371                 ec->stats_block_coalesce_usecs = 0;
11372         }
11373 }
11374
11375 static int __devinit tg3_init_one(struct pci_dev *pdev,
11376                                   const struct pci_device_id *ent)
11377 {
11378         static int tg3_version_printed = 0;
11379         unsigned long tg3reg_base, tg3reg_len;
11380         struct net_device *dev;
11381         struct tg3 *tp;
11382         int i, err, pm_cap;
11383         char str[40];
11384         u64 dma_mask, persist_dma_mask;
11385
11386         if (tg3_version_printed++ == 0)
11387                 printk(KERN_INFO "%s", version);
11388
11389         err = pci_enable_device(pdev);
11390         if (err) {
11391                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11392                        "aborting.\n");
11393                 return err;
11394         }
11395
11396         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11397                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11398                        "base address, aborting.\n");
11399                 err = -ENODEV;
11400                 goto err_out_disable_pdev;
11401         }
11402
11403         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11404         if (err) {
11405                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11406                        "aborting.\n");
11407                 goto err_out_disable_pdev;
11408         }
11409
11410         pci_set_master(pdev);
11411
11412         /* Find power-management capability. */
11413         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11414         if (pm_cap == 0) {
11415                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11416                        "aborting.\n");
11417                 err = -EIO;
11418                 goto err_out_free_res;
11419         }
11420
11421         tg3reg_base = pci_resource_start(pdev, 0);
11422         tg3reg_len = pci_resource_len(pdev, 0);
11423
11424         dev = alloc_etherdev(sizeof(*tp));
11425         if (!dev) {
11426                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11427                 err = -ENOMEM;
11428                 goto err_out_free_res;
11429         }
11430
11431         SET_MODULE_OWNER(dev);
11432         SET_NETDEV_DEV(dev, &pdev->dev);
11433
11434 #if TG3_VLAN_TAG_USED
11435         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11436         dev->vlan_rx_register = tg3_vlan_rx_register;
11437         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11438 #endif
11439
11440         tp = netdev_priv(dev);
11441         tp->pdev = pdev;
11442         tp->dev = dev;
11443         tp->pm_cap = pm_cap;
11444         tp->mac_mode = TG3_DEF_MAC_MODE;
11445         tp->rx_mode = TG3_DEF_RX_MODE;
11446         tp->tx_mode = TG3_DEF_TX_MODE;
11447         tp->mi_mode = MAC_MI_MODE_BASE;
11448         if (tg3_debug > 0)
11449                 tp->msg_enable = tg3_debug;
11450         else
11451                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11452
11453         /* The word/byte swap controls here control register access byte
11454          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11455          * setting below.
11456          */
11457         tp->misc_host_ctrl =
11458                 MISC_HOST_CTRL_MASK_PCI_INT |
11459                 MISC_HOST_CTRL_WORD_SWAP |
11460                 MISC_HOST_CTRL_INDIR_ACCESS |
11461                 MISC_HOST_CTRL_PCISTATE_RW;
11462
11463         /* The NONFRM (non-frame) byte/word swap controls take effect
11464          * on descriptor entries, anything which isn't packet data.
11465          *
11466          * The StrongARM chips on the board (one for tx, one for rx)
11467          * are running in big-endian mode.
11468          */
11469         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11470                         GRC_MODE_WSWAP_NONFRM_DATA);
11471 #ifdef __BIG_ENDIAN
11472         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11473 #endif
11474         spin_lock_init(&tp->lock);
11475         spin_lock_init(&tp->tx_lock);
11476         spin_lock_init(&tp->indirect_lock);
11477         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11478
11479         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11480         if (tp->regs == 0UL) {
11481                 printk(KERN_ERR PFX "Cannot map device registers, "
11482                        "aborting.\n");
11483                 err = -ENOMEM;
11484                 goto err_out_free_dev;
11485         }
11486
11487         tg3_init_link_config(tp);
11488
11489         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11490         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11491         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11492
11493         dev->open = tg3_open;
11494         dev->stop = tg3_close;
11495         dev->get_stats = tg3_get_stats;
11496         dev->set_multicast_list = tg3_set_rx_mode;
11497         dev->set_mac_address = tg3_set_mac_addr;
11498         dev->do_ioctl = tg3_ioctl;
11499         dev->tx_timeout = tg3_tx_timeout;
11500         dev->poll = tg3_poll;
11501         dev->ethtool_ops = &tg3_ethtool_ops;
11502         dev->weight = 64;
11503         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11504         dev->change_mtu = tg3_change_mtu;
11505         dev->irq = pdev->irq;
11506 #ifdef CONFIG_NET_POLL_CONTROLLER
11507         dev->poll_controller = tg3_poll_controller;
11508 #endif
11509
11510         err = tg3_get_invariants(tp);
11511         if (err) {
11512                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11513                        "aborting.\n");
11514                 goto err_out_iounmap;
11515         }
11516
11517         /* The EPB bridge inside 5714, 5715, and 5780 and any
11518          * device behind the EPB cannot support DMA addresses > 40-bit.
11519          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11520          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11521          * do DMA address check in tg3_start_xmit().
11522          */
11523         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11524                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11525         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11526                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11527 #ifdef CONFIG_HIGHMEM
11528                 dma_mask = DMA_64BIT_MASK;
11529 #endif
11530         } else
11531                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11532
11533         /* Configure DMA attributes. */
11534         if (dma_mask > DMA_32BIT_MASK) {
11535                 err = pci_set_dma_mask(pdev, dma_mask);
11536                 if (!err) {
11537                         dev->features |= NETIF_F_HIGHDMA;
11538                         err = pci_set_consistent_dma_mask(pdev,
11539                                                           persist_dma_mask);
11540                         if (err < 0) {
11541                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11542                                        "DMA for consistent allocations\n");
11543                                 goto err_out_iounmap;
11544                         }
11545                 }
11546         }
11547         if (err || dma_mask == DMA_32BIT_MASK) {
11548                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11549                 if (err) {
11550                         printk(KERN_ERR PFX "No usable DMA configuration, "
11551                                "aborting.\n");
11552                         goto err_out_iounmap;
11553                 }
11554         }
11555
11556         tg3_init_bufmgr_config(tp);
11557
11558 #if TG3_TSO_SUPPORT != 0
11559         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11560                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11561         }
11562         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11563             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11564             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11565             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11566                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11567         } else {
11568                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11569         }
11570
11571         /* TSO is on by default on chips that support hardware TSO.
11572          * Firmware TSO on older chips gives lower performance, so it
11573          * is off by default, but can be enabled using ethtool.
11574          */
11575         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11576                 dev->features |= NETIF_F_TSO;
11577                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11578                         dev->features |= NETIF_F_TSO6;
11579         }
11580
11581 #endif
11582
11583         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11584             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11585             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11586                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11587                 tp->rx_pending = 63;
11588         }
11589
11590         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11591             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11592                 tp->pdev_peer = tg3_find_peer(tp);
11593
11594         err = tg3_get_device_address(tp);
11595         if (err) {
11596                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11597                        "aborting.\n");
11598                 goto err_out_iounmap;
11599         }
11600
11601         /*
11602          * Reset chip in case UNDI or EFI driver did not shutdown
11603          * DMA self test will enable WDMAC and we'll see (spurious)
11604          * pending DMA on the PCI bus at that point.
11605          */
11606         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11607             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11608                 pci_save_state(tp->pdev);
11609                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11610                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11611         }
11612
11613         err = tg3_test_dma(tp);
11614         if (err) {
11615                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11616                 goto err_out_iounmap;
11617         }
11618
11619         /* Tigon3 can do ipv4 only... and some chips have buggy
11620          * checksumming.
11621          */
11622         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11623                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11624                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11625                         dev->features |= NETIF_F_HW_CSUM;
11626                 else
11627                         dev->features |= NETIF_F_IP_CSUM;
11628                 dev->features |= NETIF_F_SG;
11629                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11630         } else
11631                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11632
11633         /* flow control autonegotiation is default behavior */
11634         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11635
11636         tg3_init_coal(tp);
11637
11638         /* Now that we have fully setup the chip, save away a snapshot
11639          * of the PCI config space.  We need to restore this after
11640          * GRC_MISC_CFG core clock resets and some resume events.
11641          */
11642         pci_save_state(tp->pdev);
11643
11644         err = register_netdev(dev);
11645         if (err) {
11646                 printk(KERN_ERR PFX "Cannot register net device, "
11647                        "aborting.\n");
11648                 goto err_out_iounmap;
11649         }
11650
11651         pci_set_drvdata(pdev, dev);
11652
11653         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11654                dev->name,
11655                tp->board_part_number,
11656                tp->pci_chip_rev_id,
11657                tg3_phy_string(tp),
11658                tg3_bus_string(tp, str),
11659                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11660
11661         for (i = 0; i < 6; i++)
11662                 printk("%2.2x%c", dev->dev_addr[i],
11663                        i == 5 ? '\n' : ':');
11664
11665         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11666                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11667                "TSOcap[%d] \n",
11668                dev->name,
11669                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11670                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11671                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11672                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11673                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11674                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11675                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11676         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11677                dev->name, tp->dma_rwctrl,
11678                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11679                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11680
11681         netif_carrier_off(tp->dev);
11682
11683         return 0;
11684
11685 err_out_iounmap:
11686         if (tp->regs) {
11687                 iounmap(tp->regs);
11688                 tp->regs = NULL;
11689         }
11690
11691 err_out_free_dev:
11692         free_netdev(dev);
11693
11694 err_out_free_res:
11695         pci_release_regions(pdev);
11696
11697 err_out_disable_pdev:
11698         pci_disable_device(pdev);
11699         pci_set_drvdata(pdev, NULL);
11700         return err;
11701 }
11702
11703 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11704 {
11705         struct net_device *dev = pci_get_drvdata(pdev);
11706
11707         if (dev) {
11708                 struct tg3 *tp = netdev_priv(dev);
11709
11710                 flush_scheduled_work();
11711                 unregister_netdev(dev);
11712                 if (tp->regs) {
11713                         iounmap(tp->regs);
11714                         tp->regs = NULL;
11715                 }
11716                 free_netdev(dev);
11717                 pci_release_regions(pdev);
11718                 pci_disable_device(pdev);
11719                 pci_set_drvdata(pdev, NULL);
11720         }
11721 }
11722
11723 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11724 {
11725         struct net_device *dev = pci_get_drvdata(pdev);
11726         struct tg3 *tp = netdev_priv(dev);
11727         int err;
11728
11729         if (!netif_running(dev))
11730                 return 0;
11731
11732         flush_scheduled_work();
11733         tg3_netif_stop(tp);
11734
11735         del_timer_sync(&tp->timer);
11736
11737         tg3_full_lock(tp, 1);
11738         tg3_disable_ints(tp);
11739         tg3_full_unlock(tp);
11740
11741         netif_device_detach(dev);
11742
11743         tg3_full_lock(tp, 0);
11744         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11745         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11746         tg3_full_unlock(tp);
11747
11748         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11749         if (err) {
11750                 tg3_full_lock(tp, 0);
11751
11752                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11753                 if (tg3_restart_hw(tp, 1))
11754                         goto out;
11755
11756                 tp->timer.expires = jiffies + tp->timer_offset;
11757                 add_timer(&tp->timer);
11758
11759                 netif_device_attach(dev);
11760                 tg3_netif_start(tp);
11761
11762 out:
11763                 tg3_full_unlock(tp);
11764         }
11765
11766         return err;
11767 }
11768
11769 static int tg3_resume(struct pci_dev *pdev)
11770 {
11771         struct net_device *dev = pci_get_drvdata(pdev);
11772         struct tg3 *tp = netdev_priv(dev);
11773         int err;
11774
11775         if (!netif_running(dev))
11776                 return 0;
11777
11778         pci_restore_state(tp->pdev);
11779
11780         err = tg3_set_power_state(tp, PCI_D0);
11781         if (err)
11782                 return err;
11783
11784         netif_device_attach(dev);
11785
11786         tg3_full_lock(tp, 0);
11787
11788         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11789         err = tg3_restart_hw(tp, 1);
11790         if (err)
11791                 goto out;
11792
11793         tp->timer.expires = jiffies + tp->timer_offset;
11794         add_timer(&tp->timer);
11795
11796         tg3_netif_start(tp);
11797
11798 out:
11799         tg3_full_unlock(tp);
11800
11801         return err;
11802 }
11803
11804 static struct pci_driver tg3_driver = {
11805         .name           = DRV_MODULE_NAME,
11806         .id_table       = tg3_pci_tbl,
11807         .probe          = tg3_init_one,
11808         .remove         = __devexit_p(tg3_remove_one),
11809         .suspend        = tg3_suspend,
11810         .resume         = tg3_resume
11811 };
11812
11813 static int __init tg3_init(void)
11814 {
11815         return pci_module_init(&tg3_driver);
11816 }
11817
11818 static void __exit tg3_cleanup(void)
11819 {
11820         pci_unregister_driver(&tg3_driver);
11821 }
11822
11823 module_init(tg3_init);
11824 module_exit(tg3_cleanup);