typhoon: Need non-vmalloc memory to DMA firmware to the card.
[safe/jmp/linux-2.6] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2004 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) The current firmware always strips the VLAN tag off, even if
28                 we tell it not to. You should filter VLANs at the switch
29                 as a workaround (good practice in any event) until we can
30                 get this fixed.
31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32                 issue. Hopefully 3Com will fix it.
33         *) Waiting for a command response takes 8ms due to non-preemptable
34                 polling. Only significant for getting stats and creating
35                 SAs, but an ugly wart never the less.
36
37         TODO:
38         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39         *) Add more support for ethtool (especially for NIC stats)
40         *) Allow disabling of RX checksum offloading
41         *) Fix MAC changing to work while the interface is up
42                 (Need to put commands on the TX ring, which changes
43                 the locking)
44         *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45                 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46 */
47
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49  * Setting to > 1518 effectively disables this feature.
50  */
51 static int rx_copybreak = 200;
52
53 /* Should we use MMIO or Port IO?
54  * 0: Port IO
55  * 1: MMIO
56  * 2: Try MMIO, fallback to Port IO
57  */
58 static unsigned int use_mmio = 2;
59
60 /* end user-configurable values */
61
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63  */
64 static const int multicast_filter_limit = 32;
65
66 /* Operational parameters that are set at compile time. */
67
68 /* Keep the ring sizes a power of two for compile efficiency.
69  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70  * Making the Tx ring too large decreases the effectiveness of channel
71  * bonding and packet priority.
72  * There are no ill effects from too-large receive rings.
73  *
74  * We don't currently use the Hi Tx ring so, don't make it very big.
75  *
76  * Beware that if we start using the Hi Tx ring, we will need to change
77  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78  */
79 #define TXHI_ENTRIES            2
80 #define TXLO_ENTRIES            128
81 #define RX_ENTRIES              32
82 #define COMMAND_ENTRIES         16
83 #define RESPONSE_ENTRIES        32
84
85 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88 /* The 3XP will preload and remove 64 entries from the free buffer
89  * list, and we need one entry to keep the ring from wrapping, so
90  * to keep this a power of two, we use 128 entries.
91  */
92 #define RXFREE_ENTRIES          128
93 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
94
95 /* Operational parameters that usually are not changed. */
96
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT  (2*HZ)
99
100 #define PKT_BUF_SZ              1536
101
102 #define DRV_MODULE_NAME         "typhoon"
103 #define DRV_MODULE_VERSION      "1.5.8"
104 #define DRV_MODULE_RELDATE      "06/11/09"
105 #define PFX                     DRV_MODULE_NAME ": "
106 #define ERR_PFX                 KERN_ERR PFX
107
108 #include <linux/module.h>
109 #include <linux/kernel.h>
110 #include <linux/string.h>
111 #include <linux/timer.h>
112 #include <linux/errno.h>
113 #include <linux/ioport.h>
114 #include <linux/slab.h>
115 #include <linux/interrupt.h>
116 #include <linux/pci.h>
117 #include <linux/netdevice.h>
118 #include <linux/etherdevice.h>
119 #include <linux/skbuff.h>
120 #include <linux/mm.h>
121 #include <linux/init.h>
122 #include <linux/delay.h>
123 #include <linux/ethtool.h>
124 #include <linux/if_vlan.h>
125 #include <linux/crc32.h>
126 #include <linux/bitops.h>
127 #include <asm/processor.h>
128 #include <asm/io.h>
129 #include <asm/uaccess.h>
130 #include <linux/in6.h>
131 #include <linux/dma-mapping.h>
132 #include <linux/firmware.h>
133
134 #include "typhoon.h"
135
136 static char version[] __devinitdata =
137     "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 #define FIRMWARE_NAME           "3com/typhoon.bin"
140 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142 MODULE_LICENSE("GPL");
143 MODULE_FIRMWARE(FIRMWARE_NAME);
144 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
145 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
146                                "the buffer given back to the NIC. Default "
147                                "is 200.");
148 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
149                            "Default is to try MMIO and fallback to PIO.");
150 module_param(rx_copybreak, int, 0);
151 module_param(use_mmio, int, 0);
152
153 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
154 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
155 #undef NETIF_F_TSO
156 #endif
157
158 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
159 #error TX ring too small!
160 #endif
161
162 struct typhoon_card_info {
163         char *name;
164         int capabilities;
165 };
166
167 #define TYPHOON_CRYPTO_NONE             0x00
168 #define TYPHOON_CRYPTO_DES              0x01
169 #define TYPHOON_CRYPTO_3DES             0x02
170 #define TYPHOON_CRYPTO_VARIABLE         0x04
171 #define TYPHOON_FIBER                   0x08
172 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
173
174 enum typhoon_cards {
175         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
176         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
177         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
178         TYPHOON_FXM,
179 };
180
181 /* directly indexed by enum typhoon_cards, above */
182 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
183         { "3Com Typhoon (3C990-TX)",
184                 TYPHOON_CRYPTO_NONE},
185         { "3Com Typhoon (3CR990-TX-95)",
186                 TYPHOON_CRYPTO_DES},
187         { "3Com Typhoon (3CR990-TX-97)",
188                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
189         { "3Com Typhoon (3C990SVR)",
190                 TYPHOON_CRYPTO_NONE},
191         { "3Com Typhoon (3CR990SVR95)",
192                 TYPHOON_CRYPTO_DES},
193         { "3Com Typhoon (3CR990SVR97)",
194                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
195         { "3Com Typhoon2 (3C990B-TX-M)",
196                 TYPHOON_CRYPTO_VARIABLE},
197         { "3Com Typhoon2 (3C990BSVR)",
198                 TYPHOON_CRYPTO_VARIABLE},
199         { "3Com Typhoon (3CR990-FX-95)",
200                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
201         { "3Com Typhoon (3CR990-FX-97)",
202                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
203         { "3Com Typhoon (3CR990-FX-95 Server)",
204                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
205         { "3Com Typhoon (3CR990-FX-97 Server)",
206                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
207         { "3Com Typhoon2 (3C990B-FX-97)",
208                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
209 };
210
211 /* Notes on the new subsystem numbering scheme:
212  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
213  * bit 4 indicates if this card has secured firmware (we don't support it)
214  * bit 8 indicates if this is a (0) copper or (1) fiber card
215  * bits 12-16 indicate card type: (0) client and (1) server
216  */
217 static struct pci_device_id typhoon_pci_tbl[] = {
218         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
220         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
222         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
224         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
225           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
226         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
227           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
228         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
229           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
230         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
231           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
232         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
233           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
234         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
235           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
236         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
237           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
238         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
240         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
242         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
244         { 0, }
245 };
246 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
247
248 /* Define the shared memory area
249  * Align everything the 3XP will normally be using.
250  * We'll need to move/align txHi if we start using that ring.
251  */
252 #define __3xp_aligned   ____cacheline_aligned
253 struct typhoon_shared {
254         struct typhoon_interface        iface;
255         struct typhoon_indexes          indexes                 __3xp_aligned;
256         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
257         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
258         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
259         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
260         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
261         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
262         u32                             zeroWord;
263         struct tx_desc                  txHi[TXHI_ENTRIES];
264 } __attribute__ ((packed));
265
266 struct rxbuff_ent {
267         struct sk_buff *skb;
268         dma_addr_t      dma_addr;
269 };
270
271 struct typhoon {
272         /* Tx cache line section */
273         struct transmit_ring    txLoRing        ____cacheline_aligned;
274         struct pci_dev *        tx_pdev;
275         void __iomem            *tx_ioaddr;
276         u32                     txlo_dma_addr;
277
278         /* Irq/Rx cache line section */
279         void __iomem            *ioaddr         ____cacheline_aligned;
280         struct typhoon_indexes *indexes;
281         u8                      awaiting_resp;
282         u8                      duplex;
283         u8                      speed;
284         u8                      card_state;
285         struct basic_ring       rxLoRing;
286         struct pci_dev *        pdev;
287         struct net_device *     dev;
288         struct napi_struct      napi;
289         spinlock_t              state_lock;
290         struct vlan_group *     vlgrp;
291         struct basic_ring       rxHiRing;
292         struct basic_ring       rxBuffRing;
293         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
294
295         /* general section */
296         spinlock_t              command_lock    ____cacheline_aligned;
297         struct basic_ring       cmdRing;
298         struct basic_ring       respRing;
299         struct net_device_stats stats;
300         struct net_device_stats stats_saved;
301         const char *            name;
302         struct typhoon_shared * shared;
303         dma_addr_t              shared_dma;
304         __le16                  xcvr_select;
305         __le16                  wol_events;
306         __le32                  offload;
307
308         /* unused stuff (future use) */
309         int                     capabilities;
310         struct transmit_ring    txHiRing;
311 };
312
313 enum completion_wait_values {
314         NoWait = 0, WaitNoSleep, WaitSleep,
315 };
316
317 /* These are the values for the typhoon.card_state variable.
318  * These determine where the statistics will come from in get_stats().
319  * The sleep image does not support the statistics we need.
320  */
321 enum state_values {
322         Sleeping = 0, Running,
323 };
324
325 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
326  * cannot pass a read, so this forces current writes to post.
327  */
328 #define typhoon_post_pci_writes(x) \
329         do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
330
331 /* We'll wait up to six seconds for a reset, and half a second normally.
332  */
333 #define TYPHOON_UDELAY                  50
334 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
335 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
336 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
337
338 #if defined(NETIF_F_TSO)
339 #define skb_tso_size(x)         (skb_shinfo(x)->gso_size)
340 #define TSO_NUM_DESCRIPTORS     2
341 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
342 #else
343 #define NETIF_F_TSO             0
344 #define skb_tso_size(x)         0
345 #define TSO_NUM_DESCRIPTORS     0
346 #define TSO_OFFLOAD_ON          0
347 #endif
348
349 static inline void
350 typhoon_inc_index(u32 *index, const int count, const int num_entries)
351 {
352         /* Increment a ring index -- we can use this for all rings execept
353          * the Rx rings, as they use different size descriptors
354          * otherwise, everything is the same size as a cmd_desc
355          */
356         *index += count * sizeof(struct cmd_desc);
357         *index %= num_entries * sizeof(struct cmd_desc);
358 }
359
360 static inline void
361 typhoon_inc_cmd_index(u32 *index, const int count)
362 {
363         typhoon_inc_index(index, count, COMMAND_ENTRIES);
364 }
365
366 static inline void
367 typhoon_inc_resp_index(u32 *index, const int count)
368 {
369         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
370 }
371
372 static inline void
373 typhoon_inc_rxfree_index(u32 *index, const int count)
374 {
375         typhoon_inc_index(index, count, RXFREE_ENTRIES);
376 }
377
378 static inline void
379 typhoon_inc_tx_index(u32 *index, const int count)
380 {
381         /* if we start using the Hi Tx ring, this needs updateing */
382         typhoon_inc_index(index, count, TXLO_ENTRIES);
383 }
384
385 static inline void
386 typhoon_inc_rx_index(u32 *index, const int count)
387 {
388         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
389         *index += count * sizeof(struct rx_desc);
390         *index %= RX_ENTRIES * sizeof(struct rx_desc);
391 }
392
393 static int
394 typhoon_reset(void __iomem *ioaddr, int wait_type)
395 {
396         int i, err = 0;
397         int timeout;
398
399         if(wait_type == WaitNoSleep)
400                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
401         else
402                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
403
404         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
405         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
406
407         iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
408         typhoon_post_pci_writes(ioaddr);
409         udelay(1);
410         iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
411
412         if(wait_type != NoWait) {
413                 for(i = 0; i < timeout; i++) {
414                         if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
415                            TYPHOON_STATUS_WAITING_FOR_HOST)
416                                 goto out;
417
418                         if(wait_type == WaitSleep)
419                                 schedule_timeout_uninterruptible(1);
420                         else
421                                 udelay(TYPHOON_UDELAY);
422                 }
423
424                 err = -ETIMEDOUT;
425         }
426
427 out:
428         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
429         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
430
431         /* The 3XP seems to need a little extra time to complete the load
432          * of the sleep image before we can reliably boot it. Failure to
433          * do this occasionally results in a hung adapter after boot in
434          * typhoon_init_one() while trying to read the MAC address or
435          * putting the card to sleep. 3Com's driver waits 5ms, but
436          * that seems to be overkill. However, if we can sleep, we might
437          * as well give it that much time. Otherwise, we'll give it 500us,
438          * which should be enough (I've see it work well at 100us, but still
439          * saw occasional problems.)
440          */
441         if(wait_type == WaitSleep)
442                 msleep(5);
443         else
444                 udelay(500);
445         return err;
446 }
447
448 static int
449 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
450 {
451         int i, err = 0;
452
453         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
454                 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
455                         goto out;
456                 udelay(TYPHOON_UDELAY);
457         }
458
459         err = -ETIMEDOUT;
460
461 out:
462         return err;
463 }
464
465 static inline void
466 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
467 {
468         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
469                 netif_carrier_off(dev);
470         else
471                 netif_carrier_on(dev);
472 }
473
474 static inline void
475 typhoon_hello(struct typhoon *tp)
476 {
477         struct basic_ring *ring = &tp->cmdRing;
478         struct cmd_desc *cmd;
479
480         /* We only get a hello request if we've not sent anything to the
481          * card in a long while. If the lock is held, then we're in the
482          * process of issuing a command, so we don't need to respond.
483          */
484         if(spin_trylock(&tp->command_lock)) {
485                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
486                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
487
488                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
489                 smp_wmb();
490                 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
491                 spin_unlock(&tp->command_lock);
492         }
493 }
494
495 static int
496 typhoon_process_response(struct typhoon *tp, int resp_size,
497                                 struct resp_desc *resp_save)
498 {
499         struct typhoon_indexes *indexes = tp->indexes;
500         struct resp_desc *resp;
501         u8 *base = tp->respRing.ringBase;
502         int count, len, wrap_len;
503         u32 cleared;
504         u32 ready;
505
506         cleared = le32_to_cpu(indexes->respCleared);
507         ready = le32_to_cpu(indexes->respReady);
508         while(cleared != ready) {
509                 resp = (struct resp_desc *)(base + cleared);
510                 count = resp->numDesc + 1;
511                 if(resp_save && resp->seqNo) {
512                         if(count > resp_size) {
513                                 resp_save->flags = TYPHOON_RESP_ERROR;
514                                 goto cleanup;
515                         }
516
517                         wrap_len = 0;
518                         len = count * sizeof(*resp);
519                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
520                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
521                                 len = RESPONSE_RING_SIZE - cleared;
522                         }
523
524                         memcpy(resp_save, resp, len);
525                         if(unlikely(wrap_len)) {
526                                 resp_save += len / sizeof(*resp);
527                                 memcpy(resp_save, base, wrap_len);
528                         }
529
530                         resp_save = NULL;
531                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
532                         typhoon_media_status(tp->dev, resp);
533                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
534                         typhoon_hello(tp);
535                 } else {
536                         printk(KERN_ERR "%s: dumping unexpected response "
537                                "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
538                                tp->name, le16_to_cpu(resp->cmd),
539                                resp->numDesc, resp->flags,
540                                le16_to_cpu(resp->parm1),
541                                le32_to_cpu(resp->parm2),
542                                le32_to_cpu(resp->parm3));
543                 }
544
545 cleanup:
546                 typhoon_inc_resp_index(&cleared, count);
547         }
548
549         indexes->respCleared = cpu_to_le32(cleared);
550         wmb();
551         return (resp_save == NULL);
552 }
553
554 static inline int
555 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
556 {
557         /* this works for all descriptors but rx_desc, as they are a
558          * different size than the cmd_desc -- everyone else is the same
559          */
560         lastWrite /= sizeof(struct cmd_desc);
561         lastRead /= sizeof(struct cmd_desc);
562         return (ringSize + lastRead - lastWrite - 1) % ringSize;
563 }
564
565 static inline int
566 typhoon_num_free_cmd(struct typhoon *tp)
567 {
568         int lastWrite = tp->cmdRing.lastWrite;
569         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
570
571         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
572 }
573
574 static inline int
575 typhoon_num_free_resp(struct typhoon *tp)
576 {
577         int respReady = le32_to_cpu(tp->indexes->respReady);
578         int respCleared = le32_to_cpu(tp->indexes->respCleared);
579
580         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
581 }
582
583 static inline int
584 typhoon_num_free_tx(struct transmit_ring *ring)
585 {
586         /* if we start using the Hi Tx ring, this needs updating */
587         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
588 }
589
590 static int
591 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
592                       int num_resp, struct resp_desc *resp)
593 {
594         struct typhoon_indexes *indexes = tp->indexes;
595         struct basic_ring *ring = &tp->cmdRing;
596         struct resp_desc local_resp;
597         int i, err = 0;
598         int got_resp;
599         int freeCmd, freeResp;
600         int len, wrap_len;
601
602         spin_lock(&tp->command_lock);
603
604         freeCmd = typhoon_num_free_cmd(tp);
605         freeResp = typhoon_num_free_resp(tp);
606
607         if(freeCmd < num_cmd || freeResp < num_resp) {
608                 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
609                         "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
610                         freeResp, num_resp);
611                 err = -ENOMEM;
612                 goto out;
613         }
614
615         if(cmd->flags & TYPHOON_CMD_RESPOND) {
616                 /* If we're expecting a response, but the caller hasn't given
617                  * us a place to put it, we'll provide one.
618                  */
619                 tp->awaiting_resp = 1;
620                 if(resp == NULL) {
621                         resp = &local_resp;
622                         num_resp = 1;
623                 }
624         }
625
626         wrap_len = 0;
627         len = num_cmd * sizeof(*cmd);
628         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
629                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
630                 len = COMMAND_RING_SIZE - ring->lastWrite;
631         }
632
633         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
634         if(unlikely(wrap_len)) {
635                 struct cmd_desc *wrap_ptr = cmd;
636                 wrap_ptr += len / sizeof(*cmd);
637                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
638         }
639
640         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
641
642         /* "I feel a presence... another warrior is on the mesa."
643          */
644         wmb();
645         iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
646         typhoon_post_pci_writes(tp->ioaddr);
647
648         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
649                 goto out;
650
651         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
652          * preempt or do anything other than take interrupts. So, don't
653          * wait for a response unless you have to.
654          *
655          * I've thought about trying to sleep here, but we're called
656          * from many contexts that don't allow that. Also, given the way
657          * 3Com has implemented irq coalescing, we would likely timeout --
658          * this has been observed in real life!
659          *
660          * The big killer is we have to wait to get stats from the card,
661          * though we could go to a periodic refresh of those if we don't
662          * mind them getting somewhat stale. The rest of the waiting
663          * commands occur during open/close/suspend/resume, so they aren't
664          * time critical. Creating SAs in the future will also have to
665          * wait here.
666          */
667         got_resp = 0;
668         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
669                 if(indexes->respCleared != indexes->respReady)
670                         got_resp = typhoon_process_response(tp, num_resp,
671                                                                 resp);
672                 udelay(TYPHOON_UDELAY);
673         }
674
675         if(!got_resp) {
676                 err = -ETIMEDOUT;
677                 goto out;
678         }
679
680         /* Collect the error response even if we don't care about the
681          * rest of the response
682          */
683         if(resp->flags & TYPHOON_RESP_ERROR)
684                 err = -EIO;
685
686 out:
687         if(tp->awaiting_resp) {
688                 tp->awaiting_resp = 0;
689                 smp_wmb();
690
691                 /* Ugh. If a response was added to the ring between
692                  * the call to typhoon_process_response() and the clearing
693                  * of tp->awaiting_resp, we could have missed the interrupt
694                  * and it could hang in the ring an indeterminate amount of
695                  * time. So, check for it, and interrupt ourselves if this
696                  * is the case.
697                  */
698                 if(indexes->respCleared != indexes->respReady)
699                         iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
700         }
701
702         spin_unlock(&tp->command_lock);
703         return err;
704 }
705
706 static void
707 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
708 {
709         struct typhoon *tp = netdev_priv(dev);
710         struct cmd_desc xp_cmd;
711         int err;
712
713         spin_lock_bh(&tp->state_lock);
714         if(!tp->vlgrp != !grp) {
715                 /* We've either been turned on for the first time, or we've
716                  * been turned off. Update the 3XP.
717                  */
718                 if(grp)
719                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
720                 else
721                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
722
723                 /* If the interface is up, the runtime is running -- and we
724                  * must be up for the vlan core to call us.
725                  *
726                  * Do the command outside of the spin lock, as it is slow.
727                  */
728                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
729                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
730                 xp_cmd.parm2 = tp->offload;
731                 xp_cmd.parm3 = tp->offload;
732                 spin_unlock_bh(&tp->state_lock);
733                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
734                 if(err < 0)
735                         printk("%s: vlan offload error %d\n", tp->name, -err);
736                 spin_lock_bh(&tp->state_lock);
737         }
738
739         /* now make the change visible */
740         tp->vlgrp = grp;
741         spin_unlock_bh(&tp->state_lock);
742 }
743
744 static inline void
745 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
746                         u32 ring_dma)
747 {
748         struct tcpopt_desc *tcpd;
749         u32 tcpd_offset = ring_dma;
750
751         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
752         tcpd_offset += txRing->lastWrite;
753         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
754         typhoon_inc_tx_index(&txRing->lastWrite, 1);
755
756         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
757         tcpd->numDesc = 1;
758         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
759         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
760         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
761         tcpd->bytesTx = cpu_to_le32(skb->len);
762         tcpd->status = 0;
763 }
764
765 static int
766 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
767 {
768         struct typhoon *tp = netdev_priv(dev);
769         struct transmit_ring *txRing;
770         struct tx_desc *txd, *first_txd;
771         dma_addr_t skb_dma;
772         int numDesc;
773
774         /* we have two rings to choose from, but we only use txLo for now
775          * If we start using the Hi ring as well, we'll need to update
776          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
777          * and TXHI_ENTRIES to match, as well as update the TSO code below
778          * to get the right DMA address
779          */
780         txRing = &tp->txLoRing;
781
782         /* We need one descriptor for each fragment of the sk_buff, plus the
783          * one for the ->data area of it.
784          *
785          * The docs say a maximum of 16 fragment descriptors per TCP option
786          * descriptor, then make a new packet descriptor and option descriptor
787          * for the next 16 fragments. The engineers say just an option
788          * descriptor is needed. I've tested up to 26 fragments with a single
789          * packet descriptor/option descriptor combo, so I use that for now.
790          *
791          * If problems develop with TSO, check this first.
792          */
793         numDesc = skb_shinfo(skb)->nr_frags + 1;
794         if (skb_is_gso(skb))
795                 numDesc++;
796
797         /* When checking for free space in the ring, we need to also
798          * account for the initial Tx descriptor, and we always must leave
799          * at least one descriptor unused in the ring so that it doesn't
800          * wrap and look empty.
801          *
802          * The only time we should loop here is when we hit the race
803          * between marking the queue awake and updating the cleared index.
804          * Just loop and it will appear. This comes from the acenic driver.
805          */
806         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
807                 smp_rmb();
808
809         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
810         typhoon_inc_tx_index(&txRing->lastWrite, 1);
811
812         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
813         first_txd->numDesc = 0;
814         first_txd->len = 0;
815         first_txd->tx_addr = (u64)((unsigned long) skb);
816         first_txd->processFlags = 0;
817
818         if(skb->ip_summed == CHECKSUM_PARTIAL) {
819                 /* The 3XP will figure out if this is UDP/TCP */
820                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
821                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
822                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
823         }
824
825         if(vlan_tx_tag_present(skb)) {
826                 first_txd->processFlags |=
827                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
828                 first_txd->processFlags |=
829                     cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
830                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
831         }
832
833         if (skb_is_gso(skb)) {
834                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
835                 first_txd->numDesc++;
836
837                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
838         }
839
840         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
841         typhoon_inc_tx_index(&txRing->lastWrite, 1);
842
843         /* No need to worry about padding packet -- the firmware pads
844          * it with zeros to ETH_ZLEN for us.
845          */
846         if(skb_shinfo(skb)->nr_frags == 0) {
847                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
848                                        PCI_DMA_TODEVICE);
849                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
850                 txd->len = cpu_to_le16(skb->len);
851                 txd->frag.addr = cpu_to_le32(skb_dma);
852                 txd->frag.addrHi = 0;
853                 first_txd->numDesc++;
854         } else {
855                 int i, len;
856
857                 len = skb_headlen(skb);
858                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
859                                          PCI_DMA_TODEVICE);
860                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
861                 txd->len = cpu_to_le16(len);
862                 txd->frag.addr = cpu_to_le32(skb_dma);
863                 txd->frag.addrHi = 0;
864                 first_txd->numDesc++;
865
866                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
867                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
868                         void *frag_addr;
869
870                         txd = (struct tx_desc *) (txRing->ringBase +
871                                                 txRing->lastWrite);
872                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
873
874                         len = frag->size;
875                         frag_addr = (void *) page_address(frag->page) +
876                                                 frag->page_offset;
877                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
878                                          PCI_DMA_TODEVICE);
879                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
880                         txd->len = cpu_to_le16(len);
881                         txd->frag.addr = cpu_to_le32(skb_dma);
882                         txd->frag.addrHi = 0;
883                         first_txd->numDesc++;
884                 }
885         }
886
887         /* Kick the 3XP
888          */
889         wmb();
890         iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
891
892         dev->trans_start = jiffies;
893
894         /* If we don't have room to put the worst case packet on the
895          * queue, then we must stop the queue. We need 2 extra
896          * descriptors -- one to prevent ring wrap, and one for the
897          * Tx header.
898          */
899         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
900
901         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
902                 netif_stop_queue(dev);
903
904                 /* A Tx complete IRQ could have gotten inbetween, making
905                  * the ring free again. Only need to recheck here, since
906                  * Tx is serialized.
907                  */
908                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
909                         netif_wake_queue(dev);
910         }
911
912         return 0;
913 }
914
915 static void
916 typhoon_set_rx_mode(struct net_device *dev)
917 {
918         struct typhoon *tp = netdev_priv(dev);
919         struct cmd_desc xp_cmd;
920         u32 mc_filter[2];
921         __le16 filter;
922
923         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
924         if(dev->flags & IFF_PROMISC) {
925                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
926         } else if((dev->mc_count > multicast_filter_limit) ||
927                   (dev->flags & IFF_ALLMULTI)) {
928                 /* Too many to match, or accept all multicasts. */
929                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
930         } else if(dev->mc_count) {
931                 struct dev_mc_list *mclist;
932                 int i;
933
934                 memset(mc_filter, 0, sizeof(mc_filter));
935                 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
936                     i++, mclist = mclist->next) {
937                         int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
938                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
939                 }
940
941                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
942                                          TYPHOON_CMD_SET_MULTICAST_HASH);
943                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
944                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
945                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
946                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
947
948                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
949         }
950
951         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
952         xp_cmd.parm1 = filter;
953         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
954 }
955
956 static int
957 typhoon_do_get_stats(struct typhoon *tp)
958 {
959         struct net_device_stats *stats = &tp->stats;
960         struct net_device_stats *saved = &tp->stats_saved;
961         struct cmd_desc xp_cmd;
962         struct resp_desc xp_resp[7];
963         struct stats_resp *s = (struct stats_resp *) xp_resp;
964         int err;
965
966         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
967         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
968         if(err < 0)
969                 return err;
970
971         /* 3Com's Linux driver uses txMultipleCollisions as it's
972          * collisions value, but there is some other collision info as well...
973          *
974          * The extra status reported would be a good candidate for
975          * ethtool_ops->get_{strings,stats}()
976          */
977         stats->tx_packets = le32_to_cpu(s->txPackets);
978         stats->tx_bytes = le64_to_cpu(s->txBytes);
979         stats->tx_errors = le32_to_cpu(s->txCarrierLost);
980         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
981         stats->collisions = le32_to_cpu(s->txMultipleCollisions);
982         stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
983         stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
984         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
985         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
986                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
987         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
988         stats->rx_length_errors = le32_to_cpu(s->rxOversized);
989         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
990                         SPEED_100 : SPEED_10;
991         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
992                         DUPLEX_FULL : DUPLEX_HALF;
993
994         /* add in the saved statistics
995          */
996         stats->tx_packets += saved->tx_packets;
997         stats->tx_bytes += saved->tx_bytes;
998         stats->tx_errors += saved->tx_errors;
999         stats->collisions += saved->collisions;
1000         stats->rx_packets += saved->rx_packets;
1001         stats->rx_bytes += saved->rx_bytes;
1002         stats->rx_fifo_errors += saved->rx_fifo_errors;
1003         stats->rx_errors += saved->rx_errors;
1004         stats->rx_crc_errors += saved->rx_crc_errors;
1005         stats->rx_length_errors += saved->rx_length_errors;
1006
1007         return 0;
1008 }
1009
1010 static struct net_device_stats *
1011 typhoon_get_stats(struct net_device *dev)
1012 {
1013         struct typhoon *tp = netdev_priv(dev);
1014         struct net_device_stats *stats = &tp->stats;
1015         struct net_device_stats *saved = &tp->stats_saved;
1016
1017         smp_rmb();
1018         if(tp->card_state == Sleeping)
1019                 return saved;
1020
1021         if(typhoon_do_get_stats(tp) < 0) {
1022                 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1023                 return saved;
1024         }
1025
1026         return stats;
1027 }
1028
1029 static int
1030 typhoon_set_mac_address(struct net_device *dev, void *addr)
1031 {
1032         struct sockaddr *saddr = (struct sockaddr *) addr;
1033
1034         if(netif_running(dev))
1035                 return -EBUSY;
1036
1037         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1038         return 0;
1039 }
1040
1041 static void
1042 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1043 {
1044         struct typhoon *tp = netdev_priv(dev);
1045         struct pci_dev *pci_dev = tp->pdev;
1046         struct cmd_desc xp_cmd;
1047         struct resp_desc xp_resp[3];
1048
1049         smp_rmb();
1050         if(tp->card_state == Sleeping) {
1051                 strcpy(info->fw_version, "Sleep image");
1052         } else {
1053                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1054                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1055                         strcpy(info->fw_version, "Unknown runtime");
1056                 } else {
1057                         u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1058                         snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1059                                  sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1060                                  sleep_ver & 0xfff);
1061                 }
1062         }
1063
1064         strcpy(info->driver, DRV_MODULE_NAME);
1065         strcpy(info->version, DRV_MODULE_VERSION);
1066         strcpy(info->bus_info, pci_name(pci_dev));
1067 }
1068
1069 static int
1070 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1071 {
1072         struct typhoon *tp = netdev_priv(dev);
1073
1074         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1075                                 SUPPORTED_Autoneg;
1076
1077         switch (tp->xcvr_select) {
1078         case TYPHOON_XCVR_10HALF:
1079                 cmd->advertising = ADVERTISED_10baseT_Half;
1080                 break;
1081         case TYPHOON_XCVR_10FULL:
1082                 cmd->advertising = ADVERTISED_10baseT_Full;
1083                 break;
1084         case TYPHOON_XCVR_100HALF:
1085                 cmd->advertising = ADVERTISED_100baseT_Half;
1086                 break;
1087         case TYPHOON_XCVR_100FULL:
1088                 cmd->advertising = ADVERTISED_100baseT_Full;
1089                 break;
1090         case TYPHOON_XCVR_AUTONEG:
1091                 cmd->advertising = ADVERTISED_10baseT_Half |
1092                                             ADVERTISED_10baseT_Full |
1093                                             ADVERTISED_100baseT_Half |
1094                                             ADVERTISED_100baseT_Full |
1095                                             ADVERTISED_Autoneg;
1096                 break;
1097         }
1098
1099         if(tp->capabilities & TYPHOON_FIBER) {
1100                 cmd->supported |= SUPPORTED_FIBRE;
1101                 cmd->advertising |= ADVERTISED_FIBRE;
1102                 cmd->port = PORT_FIBRE;
1103         } else {
1104                 cmd->supported |= SUPPORTED_10baseT_Half |
1105                                         SUPPORTED_10baseT_Full |
1106                                         SUPPORTED_TP;
1107                 cmd->advertising |= ADVERTISED_TP;
1108                 cmd->port = PORT_TP;
1109         }
1110
1111         /* need to get stats to make these link speed/duplex valid */
1112         typhoon_do_get_stats(tp);
1113         cmd->speed = tp->speed;
1114         cmd->duplex = tp->duplex;
1115         cmd->phy_address = 0;
1116         cmd->transceiver = XCVR_INTERNAL;
1117         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1118                 cmd->autoneg = AUTONEG_ENABLE;
1119         else
1120                 cmd->autoneg = AUTONEG_DISABLE;
1121         cmd->maxtxpkt = 1;
1122         cmd->maxrxpkt = 1;
1123
1124         return 0;
1125 }
1126
1127 static int
1128 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1129 {
1130         struct typhoon *tp = netdev_priv(dev);
1131         struct cmd_desc xp_cmd;
1132         __le16 xcvr;
1133         int err;
1134
1135         err = -EINVAL;
1136         if(cmd->autoneg == AUTONEG_ENABLE) {
1137                 xcvr = TYPHOON_XCVR_AUTONEG;
1138         } else {
1139                 if(cmd->duplex == DUPLEX_HALF) {
1140                         if(cmd->speed == SPEED_10)
1141                                 xcvr = TYPHOON_XCVR_10HALF;
1142                         else if(cmd->speed == SPEED_100)
1143                                 xcvr = TYPHOON_XCVR_100HALF;
1144                         else
1145                                 goto out;
1146                 } else if(cmd->duplex == DUPLEX_FULL) {
1147                         if(cmd->speed == SPEED_10)
1148                                 xcvr = TYPHOON_XCVR_10FULL;
1149                         else if(cmd->speed == SPEED_100)
1150                                 xcvr = TYPHOON_XCVR_100FULL;
1151                         else
1152                                 goto out;
1153                 } else
1154                         goto out;
1155         }
1156
1157         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1158         xp_cmd.parm1 = xcvr;
1159         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1160         if(err < 0)
1161                 goto out;
1162
1163         tp->xcvr_select = xcvr;
1164         if(cmd->autoneg == AUTONEG_ENABLE) {
1165                 tp->speed = 0xff;       /* invalid */
1166                 tp->duplex = 0xff;      /* invalid */
1167         } else {
1168                 tp->speed = cmd->speed;
1169                 tp->duplex = cmd->duplex;
1170         }
1171
1172 out:
1173         return err;
1174 }
1175
1176 static void
1177 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1178 {
1179         struct typhoon *tp = netdev_priv(dev);
1180
1181         wol->supported = WAKE_PHY | WAKE_MAGIC;
1182         wol->wolopts = 0;
1183         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1184                 wol->wolopts |= WAKE_PHY;
1185         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1186                 wol->wolopts |= WAKE_MAGIC;
1187         memset(&wol->sopass, 0, sizeof(wol->sopass));
1188 }
1189
1190 static int
1191 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1192 {
1193         struct typhoon *tp = netdev_priv(dev);
1194
1195         if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1196                 return -EINVAL;
1197
1198         tp->wol_events = 0;
1199         if(wol->wolopts & WAKE_PHY)
1200                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1201         if(wol->wolopts & WAKE_MAGIC)
1202                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1203
1204         return 0;
1205 }
1206
1207 static u32
1208 typhoon_get_rx_csum(struct net_device *dev)
1209 {
1210         /* For now, we don't allow turning off RX checksums.
1211          */
1212         return 1;
1213 }
1214
1215 static void
1216 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1217 {
1218         ering->rx_max_pending = RXENT_ENTRIES;
1219         ering->rx_mini_max_pending = 0;
1220         ering->rx_jumbo_max_pending = 0;
1221         ering->tx_max_pending = TXLO_ENTRIES - 1;
1222
1223         ering->rx_pending = RXENT_ENTRIES;
1224         ering->rx_mini_pending = 0;
1225         ering->rx_jumbo_pending = 0;
1226         ering->tx_pending = TXLO_ENTRIES - 1;
1227 }
1228
1229 static const struct ethtool_ops typhoon_ethtool_ops = {
1230         .get_settings           = typhoon_get_settings,
1231         .set_settings           = typhoon_set_settings,
1232         .get_drvinfo            = typhoon_get_drvinfo,
1233         .get_wol                = typhoon_get_wol,
1234         .set_wol                = typhoon_set_wol,
1235         .get_link               = ethtool_op_get_link,
1236         .get_rx_csum            = typhoon_get_rx_csum,
1237         .set_tx_csum            = ethtool_op_set_tx_csum,
1238         .set_sg                 = ethtool_op_set_sg,
1239         .set_tso                = ethtool_op_set_tso,
1240         .get_ringparam          = typhoon_get_ringparam,
1241 };
1242
1243 static int
1244 typhoon_wait_interrupt(void __iomem *ioaddr)
1245 {
1246         int i, err = 0;
1247
1248         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1249                 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1250                    TYPHOON_INTR_BOOTCMD)
1251                         goto out;
1252                 udelay(TYPHOON_UDELAY);
1253         }
1254
1255         err = -ETIMEDOUT;
1256
1257 out:
1258         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1259         return err;
1260 }
1261
1262 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1263
1264 static void
1265 typhoon_init_interface(struct typhoon *tp)
1266 {
1267         struct typhoon_interface *iface = &tp->shared->iface;
1268         dma_addr_t shared_dma;
1269
1270         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1271
1272         /* The *Hi members of iface are all init'd to zero by the memset().
1273          */
1274         shared_dma = tp->shared_dma + shared_offset(indexes);
1275         iface->ringIndex = cpu_to_le32(shared_dma);
1276
1277         shared_dma = tp->shared_dma + shared_offset(txLo);
1278         iface->txLoAddr = cpu_to_le32(shared_dma);
1279         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1280
1281         shared_dma = tp->shared_dma + shared_offset(txHi);
1282         iface->txHiAddr = cpu_to_le32(shared_dma);
1283         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1284
1285         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1286         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1287         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1288                                         sizeof(struct rx_free));
1289
1290         shared_dma = tp->shared_dma + shared_offset(rxLo);
1291         iface->rxLoAddr = cpu_to_le32(shared_dma);
1292         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1293
1294         shared_dma = tp->shared_dma + shared_offset(rxHi);
1295         iface->rxHiAddr = cpu_to_le32(shared_dma);
1296         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1297
1298         shared_dma = tp->shared_dma + shared_offset(cmd);
1299         iface->cmdAddr = cpu_to_le32(shared_dma);
1300         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1301
1302         shared_dma = tp->shared_dma + shared_offset(resp);
1303         iface->respAddr = cpu_to_le32(shared_dma);
1304         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1305
1306         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1307         iface->zeroAddr = cpu_to_le32(shared_dma);
1308
1309         tp->indexes = &tp->shared->indexes;
1310         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1311         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1312         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1313         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1314         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1315         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1316         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1317
1318         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1319         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1320
1321         tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1322         tp->card_state = Sleeping;
1323         smp_wmb();
1324
1325         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1326         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1327
1328         spin_lock_init(&tp->command_lock);
1329         spin_lock_init(&tp->state_lock);
1330 }
1331
1332 static void
1333 typhoon_init_rings(struct typhoon *tp)
1334 {
1335         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1336
1337         tp->txLoRing.lastWrite = 0;
1338         tp->txHiRing.lastWrite = 0;
1339         tp->rxLoRing.lastWrite = 0;
1340         tp->rxHiRing.lastWrite = 0;
1341         tp->rxBuffRing.lastWrite = 0;
1342         tp->cmdRing.lastWrite = 0;
1343         tp->cmdRing.lastWrite = 0;
1344
1345         tp->txLoRing.lastRead = 0;
1346         tp->txHiRing.lastRead = 0;
1347 }
1348
1349 static const struct firmware *typhoon_fw;
1350 static u8 *typhoon_fw_image;
1351
1352 static int
1353 typhoon_request_firmware(struct typhoon *tp)
1354 {
1355         int err;
1356
1357         if (typhoon_fw)
1358                 return 0;
1359
1360         err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1361         if (err) {
1362                 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
1363                        tp->name, FIRMWARE_NAME);
1364                 return err;
1365         }
1366
1367         if (typhoon_fw->size < sizeof(struct typhoon_file_header) ||
1368             memcmp(typhoon_fw->data, "TYPHOON", 8)) {
1369                 printk(KERN_ERR "%s: Invalid firmware image\n",
1370                        tp->name);
1371                 err = -EINVAL;
1372                 goto out_err;
1373         }
1374
1375         typhoon_fw_image = kmalloc(typhoon_fw->size, GFP_KERNEL);
1376         if (!typhoon_fw_image) {
1377                 err = -ENOMEM;
1378                 goto out_err;
1379         }
1380
1381         return 0;
1382
1383 out_err:
1384         release_firmware(typhoon_fw);
1385         typhoon_fw = NULL;
1386         return err;
1387 }
1388
1389 static int
1390 typhoon_download_firmware(struct typhoon *tp)
1391 {
1392         void __iomem *ioaddr = tp->ioaddr;
1393         struct pci_dev *pdev = tp->pdev;
1394         const struct typhoon_file_header *fHdr;
1395         const struct typhoon_section_header *sHdr;
1396         const u8 *image_data;
1397         dma_addr_t image_dma;
1398         __sum16 csum;
1399         u32 irqEnabled;
1400         u32 irqMasked;
1401         u32 numSections;
1402         u32 section_len;
1403         u32 load_addr;
1404         u32 hmac;
1405         int i;
1406         int err;
1407
1408         image_data = typhoon_fw_image;
1409         fHdr = (struct typhoon_file_header *) image_data;
1410
1411         err = -ENOMEM;
1412         image_dma = pci_map_single(pdev, (u8 *) image_data,
1413                                    typhoon_fw->size, PCI_DMA_TODEVICE);
1414         if (pci_dma_mapping_error(pdev, image_dma)) {
1415                 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1416                 goto err_out;
1417         }
1418
1419         irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1420         iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1421                ioaddr + TYPHOON_REG_INTR_ENABLE);
1422         irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1423         iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1424                ioaddr + TYPHOON_REG_INTR_MASK);
1425
1426         err = -ETIMEDOUT;
1427         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1428                 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1429                 goto err_out_irq;
1430         }
1431
1432         numSections = le32_to_cpu(fHdr->numSections);
1433         load_addr = le32_to_cpu(fHdr->startAddr);
1434
1435         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1436         iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1437         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1438         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1439         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1440         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1441         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1442         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1443         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1444         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1445         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1446         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1447         typhoon_post_pci_writes(ioaddr);
1448         iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1449
1450         image_data += sizeof(struct typhoon_file_header);
1451
1452         /* The ioread32() in typhoon_wait_interrupt() will force the
1453          * last write to the command register to post, so
1454          * we don't need a typhoon_post_pci_writes() after it.
1455          */
1456         for(i = 0; i < numSections; i++) {
1457                 sHdr = (struct typhoon_section_header *) image_data;
1458                 image_data += sizeof(struct typhoon_section_header);
1459                 load_addr = le32_to_cpu(sHdr->startAddr);
1460                 section_len = le32_to_cpu(sHdr->len);
1461
1462                 if (typhoon_wait_interrupt(ioaddr) < 0 ||
1463                     ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1464                     TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1465                         printk(KERN_ERR "%s: segment ready timeout\n",
1466                                tp->name);
1467                         goto err_out_irq;
1468                 }
1469
1470                 /* Do an pseudo IPv4 checksum on the data -- first
1471                  * need to convert each u16 to cpu order before
1472                  * summing. Fortunately, due to the properties of
1473                  * the checksum, we can do this once, at the end.
1474                  */
1475                 csum = csum_fold(csum_partial(image_data, section_len, 0));
1476
1477                 iowrite32(section_len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1478                 iowrite32(le16_to_cpu((__force __le16)csum),
1479                           ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1480                 iowrite32(load_addr,
1481                           ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1482                 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1483                 iowrite32(image_dma + (image_data - typhoon_fw_image),
1484                           ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1485                 typhoon_post_pci_writes(ioaddr);
1486                 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1487                           ioaddr + TYPHOON_REG_COMMAND);
1488
1489                 image_data += section_len;
1490         }
1491
1492         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1493            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1494            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1495                 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1496                 goto err_out_irq;
1497         }
1498
1499         iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1500
1501         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1502                 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1503                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1504                 goto err_out_irq;
1505         }
1506
1507         err = 0;
1508
1509 err_out_irq:
1510         iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1511         iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1512
1513         pci_unmap_single(pdev, image_dma,  typhoon_fw->size, PCI_DMA_TODEVICE);
1514
1515 err_out:
1516         return err;
1517 }
1518
1519 static int
1520 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1521 {
1522         void __iomem *ioaddr = tp->ioaddr;
1523
1524         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1525                 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1526                 goto out_timeout;
1527         }
1528
1529         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1530         iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1531         typhoon_post_pci_writes(ioaddr);
1532         iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1533                                 ioaddr + TYPHOON_REG_COMMAND);
1534
1535         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1536                 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1537                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1538                 goto out_timeout;
1539         }
1540
1541         /* Clear the Transmit and Command ready registers
1542          */
1543         iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1544         iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1545         iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1546         typhoon_post_pci_writes(ioaddr);
1547         iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1548
1549         return 0;
1550
1551 out_timeout:
1552         return -ETIMEDOUT;
1553 }
1554
1555 static u32
1556 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1557                         volatile __le32 * index)
1558 {
1559         u32 lastRead = txRing->lastRead;
1560         struct tx_desc *tx;
1561         dma_addr_t skb_dma;
1562         int dma_len;
1563         int type;
1564
1565         while(lastRead != le32_to_cpu(*index)) {
1566                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1567                 type = tx->flags & TYPHOON_TYPE_MASK;
1568
1569                 if(type == TYPHOON_TX_DESC) {
1570                         /* This tx_desc describes a packet.
1571                          */
1572                         unsigned long ptr = tx->tx_addr;
1573                         struct sk_buff *skb = (struct sk_buff *) ptr;
1574                         dev_kfree_skb_irq(skb);
1575                 } else if(type == TYPHOON_FRAG_DESC) {
1576                         /* This tx_desc describes a memory mapping. Free it.
1577                          */
1578                         skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1579                         dma_len = le16_to_cpu(tx->len);
1580                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1581                                        PCI_DMA_TODEVICE);
1582                 }
1583
1584                 tx->flags = 0;
1585                 typhoon_inc_tx_index(&lastRead, 1);
1586         }
1587
1588         return lastRead;
1589 }
1590
1591 static void
1592 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1593                         volatile __le32 * index)
1594 {
1595         u32 lastRead;
1596         int numDesc = MAX_SKB_FRAGS + 1;
1597
1598         /* This will need changing if we start to use the Hi Tx ring. */
1599         lastRead = typhoon_clean_tx(tp, txRing, index);
1600         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1601                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1602                 netif_wake_queue(tp->dev);
1603
1604         txRing->lastRead = lastRead;
1605         smp_wmb();
1606 }
1607
1608 static void
1609 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1610 {
1611         struct typhoon_indexes *indexes = tp->indexes;
1612         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1613         struct basic_ring *ring = &tp->rxBuffRing;
1614         struct rx_free *r;
1615
1616         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1617                                 le32_to_cpu(indexes->rxBuffCleared)) {
1618                 /* no room in ring, just drop the skb
1619                  */
1620                 dev_kfree_skb_any(rxb->skb);
1621                 rxb->skb = NULL;
1622                 return;
1623         }
1624
1625         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1626         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1627         r->virtAddr = idx;
1628         r->physAddr = cpu_to_le32(rxb->dma_addr);
1629
1630         /* Tell the card about it */
1631         wmb();
1632         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1633 }
1634
1635 static int
1636 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1637 {
1638         struct typhoon_indexes *indexes = tp->indexes;
1639         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1640         struct basic_ring *ring = &tp->rxBuffRing;
1641         struct rx_free *r;
1642         struct sk_buff *skb;
1643         dma_addr_t dma_addr;
1644
1645         rxb->skb = NULL;
1646
1647         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1648                                 le32_to_cpu(indexes->rxBuffCleared))
1649                 return -ENOMEM;
1650
1651         skb = dev_alloc_skb(PKT_BUF_SZ);
1652         if(!skb)
1653                 return -ENOMEM;
1654
1655 #if 0
1656         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1657          * address! Pretty please?
1658          */
1659         skb_reserve(skb, 2);
1660 #endif
1661
1662         skb->dev = tp->dev;
1663         dma_addr = pci_map_single(tp->pdev, skb->data,
1664                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1665
1666         /* Since no card does 64 bit DAC, the high bits will never
1667          * change from zero.
1668          */
1669         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1670         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1671         r->virtAddr = idx;
1672         r->physAddr = cpu_to_le32(dma_addr);
1673         rxb->skb = skb;
1674         rxb->dma_addr = dma_addr;
1675
1676         /* Tell the card about it */
1677         wmb();
1678         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1679         return 0;
1680 }
1681
1682 static int
1683 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1684            volatile __le32 * cleared, int budget)
1685 {
1686         struct rx_desc *rx;
1687         struct sk_buff *skb, *new_skb;
1688         struct rxbuff_ent *rxb;
1689         dma_addr_t dma_addr;
1690         u32 local_ready;
1691         u32 rxaddr;
1692         int pkt_len;
1693         u32 idx;
1694         __le32 csum_bits;
1695         int received;
1696
1697         received = 0;
1698         local_ready = le32_to_cpu(*ready);
1699         rxaddr = le32_to_cpu(*cleared);
1700         while(rxaddr != local_ready && budget > 0) {
1701                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1702                 idx = rx->addr;
1703                 rxb = &tp->rxbuffers[idx];
1704                 skb = rxb->skb;
1705                 dma_addr = rxb->dma_addr;
1706
1707                 typhoon_inc_rx_index(&rxaddr, 1);
1708
1709                 if(rx->flags & TYPHOON_RX_ERROR) {
1710                         typhoon_recycle_rx_skb(tp, idx);
1711                         continue;
1712                 }
1713
1714                 pkt_len = le16_to_cpu(rx->frameLen);
1715
1716                 if(pkt_len < rx_copybreak &&
1717                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1718                         skb_reserve(new_skb, 2);
1719                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1720                                                     PKT_BUF_SZ,
1721                                                     PCI_DMA_FROMDEVICE);
1722                         skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1723                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1724                                                        PKT_BUF_SZ,
1725                                                        PCI_DMA_FROMDEVICE);
1726                         skb_put(new_skb, pkt_len);
1727                         typhoon_recycle_rx_skb(tp, idx);
1728                 } else {
1729                         new_skb = skb;
1730                         skb_put(new_skb, pkt_len);
1731                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1732                                        PCI_DMA_FROMDEVICE);
1733                         typhoon_alloc_rx_skb(tp, idx);
1734                 }
1735                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1736                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1737                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1738                 if(csum_bits ==
1739                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1740                    || csum_bits ==
1741                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1742                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1743                 } else
1744                         new_skb->ip_summed = CHECKSUM_NONE;
1745
1746                 spin_lock(&tp->state_lock);
1747                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1748                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1749                                                  ntohl(rx->vlanTag) & 0xffff);
1750                 else
1751                         netif_receive_skb(new_skb);
1752                 spin_unlock(&tp->state_lock);
1753
1754                 received++;
1755                 budget--;
1756         }
1757         *cleared = cpu_to_le32(rxaddr);
1758
1759         return received;
1760 }
1761
1762 static void
1763 typhoon_fill_free_ring(struct typhoon *tp)
1764 {
1765         u32 i;
1766
1767         for(i = 0; i < RXENT_ENTRIES; i++) {
1768                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1769                 if(rxb->skb)
1770                         continue;
1771                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1772                         break;
1773         }
1774 }
1775
1776 static int
1777 typhoon_poll(struct napi_struct *napi, int budget)
1778 {
1779         struct typhoon *tp = container_of(napi, struct typhoon, napi);
1780         struct typhoon_indexes *indexes = tp->indexes;
1781         int work_done;
1782
1783         rmb();
1784         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1785                         typhoon_process_response(tp, 0, NULL);
1786
1787         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1788                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1789
1790         work_done = 0;
1791
1792         if(indexes->rxHiCleared != indexes->rxHiReady) {
1793                 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1794                                         &indexes->rxHiCleared, budget);
1795         }
1796
1797         if(indexes->rxLoCleared != indexes->rxLoReady) {
1798                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1799                                         &indexes->rxLoCleared, budget - work_done);
1800         }
1801
1802         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1803                 /* rxBuff ring is empty, try to fill it. */
1804                 typhoon_fill_free_ring(tp);
1805         }
1806
1807         if (work_done < budget) {
1808                 napi_complete(napi);
1809                 iowrite32(TYPHOON_INTR_NONE,
1810                                 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1811                 typhoon_post_pci_writes(tp->ioaddr);
1812         }
1813
1814         return work_done;
1815 }
1816
1817 static irqreturn_t
1818 typhoon_interrupt(int irq, void *dev_instance)
1819 {
1820         struct net_device *dev = dev_instance;
1821         struct typhoon *tp = netdev_priv(dev);
1822         void __iomem *ioaddr = tp->ioaddr;
1823         u32 intr_status;
1824
1825         intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1826         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1827                 return IRQ_NONE;
1828
1829         iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1830
1831         if (napi_schedule_prep(&tp->napi)) {
1832                 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1833                 typhoon_post_pci_writes(ioaddr);
1834                 __napi_schedule(&tp->napi);
1835         } else {
1836                 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1837                        dev->name);
1838         }
1839         return IRQ_HANDLED;
1840 }
1841
1842 static void
1843 typhoon_free_rx_rings(struct typhoon *tp)
1844 {
1845         u32 i;
1846
1847         for(i = 0; i < RXENT_ENTRIES; i++) {
1848                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1849                 if(rxb->skb) {
1850                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1851                                        PCI_DMA_FROMDEVICE);
1852                         dev_kfree_skb(rxb->skb);
1853                         rxb->skb = NULL;
1854                 }
1855         }
1856 }
1857
1858 static int
1859 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1860 {
1861         struct pci_dev *pdev = tp->pdev;
1862         void __iomem *ioaddr = tp->ioaddr;
1863         struct cmd_desc xp_cmd;
1864         int err;
1865
1866         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1867         xp_cmd.parm1 = events;
1868         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1869         if(err < 0) {
1870                 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1871                                 tp->name, err);
1872                 return err;
1873         }
1874
1875         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1876         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1877         if(err < 0) {
1878                 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1879                                 tp->name, err);
1880                 return err;
1881         }
1882
1883         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1884                 return -ETIMEDOUT;
1885
1886         /* Since we cannot monitor the status of the link while sleeping,
1887          * tell the world it went away.
1888          */
1889         netif_carrier_off(tp->dev);
1890
1891         pci_enable_wake(tp->pdev, state, 1);
1892         pci_disable_device(pdev);
1893         return pci_set_power_state(pdev, state);
1894 }
1895
1896 static int
1897 typhoon_wakeup(struct typhoon *tp, int wait_type)
1898 {
1899         struct pci_dev *pdev = tp->pdev;
1900         void __iomem *ioaddr = tp->ioaddr;
1901
1902         pci_set_power_state(pdev, PCI_D0);
1903         pci_restore_state(pdev);
1904
1905         /* Post 2.x.x versions of the Sleep Image require a reset before
1906          * we can download the Runtime Image. But let's not make users of
1907          * the old firmware pay for the reset.
1908          */
1909         iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1910         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1911                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1912                 return typhoon_reset(ioaddr, wait_type);
1913
1914         return 0;
1915 }
1916
1917 static int
1918 typhoon_start_runtime(struct typhoon *tp)
1919 {
1920         struct net_device *dev = tp->dev;
1921         void __iomem *ioaddr = tp->ioaddr;
1922         struct cmd_desc xp_cmd;
1923         int err;
1924
1925         typhoon_init_rings(tp);
1926         typhoon_fill_free_ring(tp);
1927
1928         err = typhoon_download_firmware(tp);
1929         if(err < 0) {
1930                 printk("%s: cannot load runtime on 3XP\n", tp->name);
1931                 goto error_out;
1932         }
1933
1934         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1935                 printk("%s: cannot boot 3XP\n", tp->name);
1936                 err = -EIO;
1937                 goto error_out;
1938         }
1939
1940         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1941         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1942         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1943         if(err < 0)
1944                 goto error_out;
1945
1946         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1947         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1948         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1949         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1950         if(err < 0)
1951                 goto error_out;
1952
1953         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1954          * us some more information on how to control it.
1955          */
1956         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1957         xp_cmd.parm1 = 0;
1958         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1959         if(err < 0)
1960                 goto error_out;
1961
1962         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1963         xp_cmd.parm1 = tp->xcvr_select;
1964         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1965         if(err < 0)
1966                 goto error_out;
1967
1968         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1969         xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1970         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1971         if(err < 0)
1972                 goto error_out;
1973
1974         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1975         spin_lock_bh(&tp->state_lock);
1976         xp_cmd.parm2 = tp->offload;
1977         xp_cmd.parm3 = tp->offload;
1978         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1979         spin_unlock_bh(&tp->state_lock);
1980         if(err < 0)
1981                 goto error_out;
1982
1983         typhoon_set_rx_mode(dev);
1984
1985         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1986         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1987         if(err < 0)
1988                 goto error_out;
1989
1990         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1991         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1992         if(err < 0)
1993                 goto error_out;
1994
1995         tp->card_state = Running;
1996         smp_wmb();
1997
1998         iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1999         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2000         typhoon_post_pci_writes(ioaddr);
2001
2002         return 0;
2003
2004 error_out:
2005         typhoon_reset(ioaddr, WaitNoSleep);
2006         typhoon_free_rx_rings(tp);
2007         typhoon_init_rings(tp);
2008         return err;
2009 }
2010
2011 static int
2012 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2013 {
2014         struct typhoon_indexes *indexes = tp->indexes;
2015         struct transmit_ring *txLo = &tp->txLoRing;
2016         void __iomem *ioaddr = tp->ioaddr;
2017         struct cmd_desc xp_cmd;
2018         int i;
2019
2020         /* Disable interrupts early, since we can't schedule a poll
2021          * when called with !netif_running(). This will be posted
2022          * when we force the posting of the command.
2023          */
2024         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2025
2026         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2027         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2028
2029         /* Wait 1/2 sec for any outstanding transmits to occur
2030          * We'll cleanup after the reset if this times out.
2031          */
2032         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2033                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2034                         break;
2035                 udelay(TYPHOON_UDELAY);
2036         }
2037
2038         if(i == TYPHOON_WAIT_TIMEOUT)
2039                 printk(KERN_ERR
2040                        "%s: halt timed out waiting for Tx to complete\n",
2041                        tp->name);
2042
2043         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2044         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2045
2046         /* save the statistics so when we bring the interface up again,
2047          * the values reported to userspace are correct.
2048          */
2049         tp->card_state = Sleeping;
2050         smp_wmb();
2051         typhoon_do_get_stats(tp);
2052         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2053
2054         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2055         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2056
2057         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2058                 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2059                        tp->name);
2060
2061         if(typhoon_reset(ioaddr, wait_type) < 0) {
2062                 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2063                 return -ETIMEDOUT;
2064         }
2065
2066         /* cleanup any outstanding Tx packets */
2067         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2068                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2069                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2070         }
2071
2072         return 0;
2073 }
2074
2075 static void
2076 typhoon_tx_timeout(struct net_device *dev)
2077 {
2078         struct typhoon *tp = netdev_priv(dev);
2079
2080         if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2081                 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2082                                         dev->name);
2083                 goto truely_dead;
2084         }
2085
2086         /* If we ever start using the Hi ring, it will need cleaning too */
2087         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2088         typhoon_free_rx_rings(tp);
2089
2090         if(typhoon_start_runtime(tp) < 0) {
2091                 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2092                                         dev->name);
2093                 goto truely_dead;
2094         }
2095
2096         netif_wake_queue(dev);
2097         return;
2098
2099 truely_dead:
2100         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2101         typhoon_reset(tp->ioaddr, NoWait);
2102         netif_carrier_off(dev);
2103 }
2104
2105 static int
2106 typhoon_open(struct net_device *dev)
2107 {
2108         struct typhoon *tp = netdev_priv(dev);
2109         int err;
2110
2111         err = typhoon_request_firmware(tp);
2112         if (err)
2113                 goto out;
2114
2115         err = typhoon_wakeup(tp, WaitSleep);
2116         if(err < 0) {
2117                 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2118                 goto out_sleep;
2119         }
2120
2121         err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED,
2122                                 dev->name, dev);
2123         if(err < 0)
2124                 goto out_sleep;
2125
2126         napi_enable(&tp->napi);
2127
2128         err = typhoon_start_runtime(tp);
2129         if(err < 0) {
2130                 napi_disable(&tp->napi);
2131                 goto out_irq;
2132         }
2133
2134         netif_start_queue(dev);
2135         return 0;
2136
2137 out_irq:
2138         free_irq(dev->irq, dev);
2139
2140 out_sleep:
2141         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2142                 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2143                                 dev->name);
2144                 typhoon_reset(tp->ioaddr, NoWait);
2145                 goto out;
2146         }
2147
2148         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2149                 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2150
2151 out:
2152         return err;
2153 }
2154
2155 static int
2156 typhoon_close(struct net_device *dev)
2157 {
2158         struct typhoon *tp = netdev_priv(dev);
2159
2160         netif_stop_queue(dev);
2161         napi_disable(&tp->napi);
2162
2163         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2164                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2165
2166         /* Make sure there is no irq handler running on a different CPU. */
2167         free_irq(dev->irq, dev);
2168
2169         typhoon_free_rx_rings(tp);
2170         typhoon_init_rings(tp);
2171
2172         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2173                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2174
2175         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2176                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2177
2178         return 0;
2179 }
2180
2181 #ifdef CONFIG_PM
2182 static int
2183 typhoon_resume(struct pci_dev *pdev)
2184 {
2185         struct net_device *dev = pci_get_drvdata(pdev);
2186         struct typhoon *tp = netdev_priv(dev);
2187
2188         /* If we're down, resume when we are upped.
2189          */
2190         if(!netif_running(dev))
2191                 return 0;
2192
2193         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2194                 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2195                                 dev->name);
2196                 goto reset;
2197         }
2198
2199         if(typhoon_start_runtime(tp) < 0) {
2200                 printk(KERN_ERR "%s: critical: could not start runtime in "
2201                                 "resume\n", dev->name);
2202                 goto reset;
2203         }
2204
2205         netif_device_attach(dev);
2206         return 0;
2207
2208 reset:
2209         typhoon_reset(tp->ioaddr, NoWait);
2210         return -EBUSY;
2211 }
2212
2213 static int
2214 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2215 {
2216         struct net_device *dev = pci_get_drvdata(pdev);
2217         struct typhoon *tp = netdev_priv(dev);
2218         struct cmd_desc xp_cmd;
2219
2220         /* If we're down, we're already suspended.
2221          */
2222         if(!netif_running(dev))
2223                 return 0;
2224
2225         spin_lock_bh(&tp->state_lock);
2226         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2227                 spin_unlock_bh(&tp->state_lock);
2228                 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2229                                 dev->name);
2230                 return -EBUSY;
2231         }
2232         spin_unlock_bh(&tp->state_lock);
2233
2234         netif_device_detach(dev);
2235
2236         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2237                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2238                 goto need_resume;
2239         }
2240
2241         typhoon_free_rx_rings(tp);
2242         typhoon_init_rings(tp);
2243
2244         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2245                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2246                 goto need_resume;
2247         }
2248
2249         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2250         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2251         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2252         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2253                 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2254                                 dev->name);
2255                 goto need_resume;
2256         }
2257
2258         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2259         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2260         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2261                 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2262                                 dev->name);
2263                 goto need_resume;
2264         }
2265
2266         if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2267                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2268                 goto need_resume;
2269         }
2270
2271         return 0;
2272
2273 need_resume:
2274         typhoon_resume(pdev);
2275         return -EBUSY;
2276 }
2277 #endif
2278
2279 static int __devinit
2280 typhoon_test_mmio(struct pci_dev *pdev)
2281 {
2282         void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2283         int mode = 0;
2284         u32 val;
2285
2286         if(!ioaddr)
2287                 goto out;
2288
2289         if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2290                                 TYPHOON_STATUS_WAITING_FOR_HOST)
2291                 goto out_unmap;
2292
2293         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2294         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2295         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2296
2297         /* Ok, see if we can change our interrupt status register by
2298          * sending ourselves an interrupt. If so, then MMIO works.
2299          * The 50usec delay is arbitrary -- it could probably be smaller.
2300          */
2301         val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2302         if((val & TYPHOON_INTR_SELF) == 0) {
2303                 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2304                 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2305                 udelay(50);
2306                 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2307                 if(val & TYPHOON_INTR_SELF)
2308                         mode = 1;
2309         }
2310
2311         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2312         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2313         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2314         ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2315
2316 out_unmap:
2317         pci_iounmap(pdev, ioaddr);
2318
2319 out:
2320         if(!mode)
2321                 printk(KERN_INFO PFX "falling back to port IO\n");
2322         return mode;
2323 }
2324
2325 static const struct net_device_ops typhoon_netdev_ops = {
2326         .ndo_open               = typhoon_open,
2327         .ndo_stop               = typhoon_close,
2328         .ndo_start_xmit         = typhoon_start_tx,
2329         .ndo_set_multicast_list = typhoon_set_rx_mode,
2330         .ndo_tx_timeout         = typhoon_tx_timeout,
2331         .ndo_get_stats          = typhoon_get_stats,
2332         .ndo_validate_addr      = eth_validate_addr,
2333         .ndo_set_mac_address    = typhoon_set_mac_address,
2334         .ndo_change_mtu         = eth_change_mtu,
2335         .ndo_vlan_rx_register   = typhoon_vlan_rx_register,
2336 };
2337
2338 static int __devinit
2339 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2340 {
2341         static int did_version = 0;
2342         struct net_device *dev;
2343         struct typhoon *tp;
2344         int card_id = (int) ent->driver_data;
2345         void __iomem *ioaddr;
2346         void *shared;
2347         dma_addr_t shared_dma;
2348         struct cmd_desc xp_cmd;
2349         struct resp_desc xp_resp[3];
2350         int err = 0;
2351
2352         if(!did_version++)
2353                 printk(KERN_INFO "%s", version);
2354
2355         dev = alloc_etherdev(sizeof(*tp));
2356         if(dev == NULL) {
2357                 printk(ERR_PFX "%s: unable to alloc new net device\n",
2358                        pci_name(pdev));
2359                 err = -ENOMEM;
2360                 goto error_out;
2361         }
2362         SET_NETDEV_DEV(dev, &pdev->dev);
2363
2364         err = pci_enable_device(pdev);
2365         if(err < 0) {
2366                 printk(ERR_PFX "%s: unable to enable device\n",
2367                        pci_name(pdev));
2368                 goto error_out_dev;
2369         }
2370
2371         err = pci_set_mwi(pdev);
2372         if(err < 0) {
2373                 printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
2374                 goto error_out_disable;
2375         }
2376
2377         err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2378         if(err < 0) {
2379                 printk(ERR_PFX "%s: No usable DMA configuration\n",
2380                        pci_name(pdev));
2381                 goto error_out_mwi;
2382         }
2383
2384         /* sanity checks on IO and MMIO BARs
2385          */
2386         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2387                 printk(ERR_PFX
2388                        "%s: region #1 not a PCI IO resource, aborting\n",
2389                        pci_name(pdev));
2390                 err = -ENODEV;
2391                 goto error_out_mwi;
2392         }
2393         if(pci_resource_len(pdev, 0) < 128) {
2394                 printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
2395                        pci_name(pdev));
2396                 err = -ENODEV;
2397                 goto error_out_mwi;
2398         }
2399         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2400                 printk(ERR_PFX
2401                        "%s: region #1 not a PCI MMIO resource, aborting\n",
2402                        pci_name(pdev));
2403                 err = -ENODEV;
2404                 goto error_out_mwi;
2405         }
2406         if(pci_resource_len(pdev, 1) < 128) {
2407                 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2408                        pci_name(pdev));
2409                 err = -ENODEV;
2410                 goto error_out_mwi;
2411         }
2412
2413         err = pci_request_regions(pdev, "typhoon");
2414         if(err < 0) {
2415                 printk(ERR_PFX "%s: could not request regions\n",
2416                        pci_name(pdev));
2417                 goto error_out_mwi;
2418         }
2419
2420         /* map our registers
2421          */
2422         if(use_mmio != 0 && use_mmio != 1)
2423                 use_mmio = typhoon_test_mmio(pdev);
2424
2425         ioaddr = pci_iomap(pdev, use_mmio, 128);
2426         if (!ioaddr) {
2427                 printk(ERR_PFX "%s: cannot remap registers, aborting\n",
2428                        pci_name(pdev));
2429                 err = -EIO;
2430                 goto error_out_regions;
2431         }
2432
2433         /* allocate pci dma space for rx and tx descriptor rings
2434          */
2435         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2436                                       &shared_dma);
2437         if(!shared) {
2438                 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2439                        pci_name(pdev));
2440                 err = -ENOMEM;
2441                 goto error_out_remap;
2442         }
2443
2444         dev->irq = pdev->irq;
2445         tp = netdev_priv(dev);
2446         tp->shared = (struct typhoon_shared *) shared;
2447         tp->shared_dma = shared_dma;
2448         tp->pdev = pdev;
2449         tp->tx_pdev = pdev;
2450         tp->ioaddr = ioaddr;
2451         tp->tx_ioaddr = ioaddr;
2452         tp->dev = dev;
2453
2454         /* Init sequence:
2455          * 1) Reset the adapter to clear any bad juju
2456          * 2) Reload the sleep image
2457          * 3) Boot the sleep image
2458          * 4) Get the hardware address.
2459          * 5) Put the card to sleep.
2460          */
2461         if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2462                 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2463                 err = -EIO;
2464                 goto error_out_dma;
2465         }
2466
2467         /* Now that we've reset the 3XP and are sure it's not going to
2468          * write all over memory, enable bus mastering, and save our
2469          * state for resuming after a suspend.
2470          */
2471         pci_set_master(pdev);
2472         pci_save_state(pdev);
2473
2474         /* dev->name is not valid until we register, but we need to
2475          * use some common routines to initialize the card. So that those
2476          * routines print the right name, we keep our oun pointer to the name
2477          */
2478         tp->name = pci_name(pdev);
2479
2480         typhoon_init_interface(tp);
2481         typhoon_init_rings(tp);
2482
2483         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2484                 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2485                        pci_name(pdev));
2486                 err = -EIO;
2487                 goto error_out_reset;
2488         }
2489
2490         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2491         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2492                 printk(ERR_PFX "%s: cannot read MAC address\n",
2493                        pci_name(pdev));
2494                 err = -EIO;
2495                 goto error_out_reset;
2496         }
2497
2498         *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2499         *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2500
2501         if(!is_valid_ether_addr(dev->dev_addr)) {
2502                 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2503                        "aborting\n", pci_name(pdev));
2504                 goto error_out_reset;
2505         }
2506
2507         /* Read the Sleep Image version last, so the response is valid
2508          * later when we print out the version reported.
2509          */
2510         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2511         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2512                 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2513                         pci_name(pdev));
2514                 goto error_out_reset;
2515         }
2516
2517         tp->capabilities = typhoon_card_info[card_id].capabilities;
2518         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2519
2520         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2521          * READ_VERSIONS command. Those versions are OK after waking up
2522          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2523          * seem to need a little extra help to get started. Since we don't
2524          * know how to nudge it along, just kick it.
2525          */
2526         if(xp_resp[0].numDesc != 0)
2527                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2528
2529         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2530                 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2531                        pci_name(pdev));
2532                 err = -EIO;
2533                 goto error_out_reset;
2534         }
2535
2536         /* The chip-specific entries in the device structure. */
2537         dev->netdev_ops         = &typhoon_netdev_ops;
2538         netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2539         dev->watchdog_timeo     = TX_TIMEOUT;
2540
2541         SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2542
2543         /* We can handle scatter gather, up to 16 entries, and
2544          * we can do IP checksumming (only version 4, doh...)
2545          */
2546         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2547         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2548         dev->features |= NETIF_F_TSO;
2549
2550         if(register_netdev(dev) < 0)
2551                 goto error_out_reset;
2552
2553         /* fixup our local name */
2554         tp->name = dev->name;
2555
2556         pci_set_drvdata(pdev, dev);
2557
2558         printk(KERN_INFO "%s: %s at %s 0x%llx, %pM\n",
2559                dev->name, typhoon_card_info[card_id].name,
2560                use_mmio ? "MMIO" : "IO",
2561                (unsigned long long)pci_resource_start(pdev, use_mmio),
2562                dev->dev_addr);
2563
2564         /* xp_resp still contains the response to the READ_VERSIONS command.
2565          * For debugging, let the user know what version he has.
2566          */
2567         if(xp_resp[0].numDesc == 0) {
2568                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2569                  * of version is Month/Day of build.
2570                  */
2571                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2572                 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2573                         "%02u/%02u/2000\n", dev->name, monthday >> 8,
2574                         monthday & 0xff);
2575         } else if(xp_resp[0].numDesc == 2) {
2576                 /* This is the Typhoon 1.1+ type Sleep Image
2577                  */
2578                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2579                 u8 *ver_string = (u8 *) &xp_resp[1];
2580                 ver_string[25] = 0;
2581                 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2582                         "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
2583                         (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
2584                         ver_string);
2585         } else {
2586                 printk(KERN_WARNING "%s: Unknown Sleep Image version "
2587                         "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2588                         le32_to_cpu(xp_resp[0].parm2));
2589         }
2590
2591         return 0;
2592
2593 error_out_reset:
2594         typhoon_reset(ioaddr, NoWait);
2595
2596 error_out_dma:
2597         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2598                             shared, shared_dma);
2599 error_out_remap:
2600         pci_iounmap(pdev, ioaddr);
2601 error_out_regions:
2602         pci_release_regions(pdev);
2603 error_out_mwi:
2604         pci_clear_mwi(pdev);
2605 error_out_disable:
2606         pci_disable_device(pdev);
2607 error_out_dev:
2608         free_netdev(dev);
2609 error_out:
2610         return err;
2611 }
2612
2613 static void __devexit
2614 typhoon_remove_one(struct pci_dev *pdev)
2615 {
2616         struct net_device *dev = pci_get_drvdata(pdev);
2617         struct typhoon *tp = netdev_priv(dev);
2618
2619         unregister_netdev(dev);
2620         pci_set_power_state(pdev, PCI_D0);
2621         pci_restore_state(pdev);
2622         typhoon_reset(tp->ioaddr, NoWait);
2623         pci_iounmap(pdev, tp->ioaddr);
2624         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2625                             tp->shared, tp->shared_dma);
2626         pci_release_regions(pdev);
2627         pci_clear_mwi(pdev);
2628         pci_disable_device(pdev);
2629         pci_set_drvdata(pdev, NULL);
2630         free_netdev(dev);
2631 }
2632
2633 static struct pci_driver typhoon_driver = {
2634         .name           = DRV_MODULE_NAME,
2635         .id_table       = typhoon_pci_tbl,
2636         .probe          = typhoon_init_one,
2637         .remove         = __devexit_p(typhoon_remove_one),
2638 #ifdef CONFIG_PM
2639         .suspend        = typhoon_suspend,
2640         .resume         = typhoon_resume,
2641 #endif
2642 };
2643
2644 static int __init
2645 typhoon_init(void)
2646 {
2647         return pci_register_driver(&typhoon_driver);
2648 }
2649
2650 static void __exit
2651 typhoon_cleanup(void)
2652 {
2653         if (typhoon_fw) {
2654                 kfree(typhoon_fw_image);
2655                 release_firmware(typhoon_fw);
2656         }
2657         pci_unregister_driver(&typhoon_driver);
2658 }
2659
2660 module_init(typhoon_init);
2661 module_exit(typhoon_cleanup);