net: Micrel KSZ8841/2 PCI Ethernet driver
[safe/jmp/linux-2.6] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2004 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) The current firmware always strips the VLAN tag off, even if
28                 we tell it not to. You should filter VLANs at the switch
29                 as a workaround (good practice in any event) until we can
30                 get this fixed.
31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32                 issue. Hopefully 3Com will fix it.
33         *) Waiting for a command response takes 8ms due to non-preemptable
34                 polling. Only significant for getting stats and creating
35                 SAs, but an ugly wart never the less.
36
37         TODO:
38         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39         *) Add more support for ethtool (especially for NIC stats)
40         *) Allow disabling of RX checksum offloading
41         *) Fix MAC changing to work while the interface is up
42                 (Need to put commands on the TX ring, which changes
43                 the locking)
44         *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45                 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46 */
47
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49  * Setting to > 1518 effectively disables this feature.
50  */
51 static int rx_copybreak = 200;
52
53 /* Should we use MMIO or Port IO?
54  * 0: Port IO
55  * 1: MMIO
56  * 2: Try MMIO, fallback to Port IO
57  */
58 static unsigned int use_mmio = 2;
59
60 /* end user-configurable values */
61
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63  */
64 static const int multicast_filter_limit = 32;
65
66 /* Operational parameters that are set at compile time. */
67
68 /* Keep the ring sizes a power of two for compile efficiency.
69  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70  * Making the Tx ring too large decreases the effectiveness of channel
71  * bonding and packet priority.
72  * There are no ill effects from too-large receive rings.
73  *
74  * We don't currently use the Hi Tx ring so, don't make it very big.
75  *
76  * Beware that if we start using the Hi Tx ring, we will need to change
77  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78  */
79 #define TXHI_ENTRIES            2
80 #define TXLO_ENTRIES            128
81 #define RX_ENTRIES              32
82 #define COMMAND_ENTRIES         16
83 #define RESPONSE_ENTRIES        32
84
85 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88 /* The 3XP will preload and remove 64 entries from the free buffer
89  * list, and we need one entry to keep the ring from wrapping, so
90  * to keep this a power of two, we use 128 entries.
91  */
92 #define RXFREE_ENTRIES          128
93 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
94
95 /* Operational parameters that usually are not changed. */
96
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT  (2*HZ)
99
100 #define PKT_BUF_SZ              1536
101
102 #define DRV_MODULE_NAME         "typhoon"
103 #define DRV_MODULE_VERSION      "1.5.9"
104 #define DRV_MODULE_RELDATE      "Mar 2, 2009"
105 #define PFX                     DRV_MODULE_NAME ": "
106 #define ERR_PFX                 KERN_ERR PFX
107 #define FIRMWARE_NAME           "3com/typhoon.bin"
108
109 #include <linux/module.h>
110 #include <linux/kernel.h>
111 #include <linux/sched.h>
112 #include <linux/string.h>
113 #include <linux/timer.h>
114 #include <linux/errno.h>
115 #include <linux/ioport.h>
116 #include <linux/slab.h>
117 #include <linux/interrupt.h>
118 #include <linux/pci.h>
119 #include <linux/netdevice.h>
120 #include <linux/etherdevice.h>
121 #include <linux/skbuff.h>
122 #include <linux/mm.h>
123 #include <linux/init.h>
124 #include <linux/delay.h>
125 #include <linux/ethtool.h>
126 #include <linux/if_vlan.h>
127 #include <linux/crc32.h>
128 #include <linux/bitops.h>
129 #include <asm/processor.h>
130 #include <asm/io.h>
131 #include <asm/uaccess.h>
132 #include <linux/in6.h>
133 #include <linux/dma-mapping.h>
134 #include <linux/firmware.h>
135
136 #include "typhoon.h"
137
138 static char version[] __devinitdata =
139     "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
140
141 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143 MODULE_LICENSE("GPL");
144 MODULE_FIRMWARE(FIRMWARE_NAME);
145 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
146 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
147                                "the buffer given back to the NIC. Default "
148                                "is 200.");
149 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
150                            "Default is to try MMIO and fallback to PIO.");
151 module_param(rx_copybreak, int, 0);
152 module_param(use_mmio, int, 0);
153
154 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
155 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
156 #undef NETIF_F_TSO
157 #endif
158
159 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
160 #error TX ring too small!
161 #endif
162
163 struct typhoon_card_info {
164         char *name;
165         int capabilities;
166 };
167
168 #define TYPHOON_CRYPTO_NONE             0x00
169 #define TYPHOON_CRYPTO_DES              0x01
170 #define TYPHOON_CRYPTO_3DES             0x02
171 #define TYPHOON_CRYPTO_VARIABLE         0x04
172 #define TYPHOON_FIBER                   0x08
173 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
174
175 enum typhoon_cards {
176         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
177         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
178         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
179         TYPHOON_FXM,
180 };
181
182 /* directly indexed by enum typhoon_cards, above */
183 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
184         { "3Com Typhoon (3C990-TX)",
185                 TYPHOON_CRYPTO_NONE},
186         { "3Com Typhoon (3CR990-TX-95)",
187                 TYPHOON_CRYPTO_DES},
188         { "3Com Typhoon (3CR990-TX-97)",
189                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
190         { "3Com Typhoon (3C990SVR)",
191                 TYPHOON_CRYPTO_NONE},
192         { "3Com Typhoon (3CR990SVR95)",
193                 TYPHOON_CRYPTO_DES},
194         { "3Com Typhoon (3CR990SVR97)",
195                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
196         { "3Com Typhoon2 (3C990B-TX-M)",
197                 TYPHOON_CRYPTO_VARIABLE},
198         { "3Com Typhoon2 (3C990BSVR)",
199                 TYPHOON_CRYPTO_VARIABLE},
200         { "3Com Typhoon (3CR990-FX-95)",
201                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
202         { "3Com Typhoon (3CR990-FX-97)",
203                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
204         { "3Com Typhoon (3CR990-FX-95 Server)",
205                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
206         { "3Com Typhoon (3CR990-FX-97 Server)",
207                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
208         { "3Com Typhoon2 (3C990B-FX-97)",
209                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
210 };
211
212 /* Notes on the new subsystem numbering scheme:
213  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
214  * bit 4 indicates if this card has secured firmware (we don't support it)
215  * bit 8 indicates if this is a (0) copper or (1) fiber card
216  * bits 12-16 indicate card type: (0) client and (1) server
217  */
218 static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
219         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
221         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
223         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
225         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
226           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
227         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
228           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
229         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
230           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
231         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
232           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
233         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
234           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
235         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
236           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
237         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
238           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
239         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
240           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
241         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
242           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
243         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
244           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
245         { 0, }
246 };
247 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
248
249 /* Define the shared memory area
250  * Align everything the 3XP will normally be using.
251  * We'll need to move/align txHi if we start using that ring.
252  */
253 #define __3xp_aligned   ____cacheline_aligned
254 struct typhoon_shared {
255         struct typhoon_interface        iface;
256         struct typhoon_indexes          indexes                 __3xp_aligned;
257         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
258         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
259         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
260         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
261         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
262         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
263         u32                             zeroWord;
264         struct tx_desc                  txHi[TXHI_ENTRIES];
265 } __attribute__ ((packed));
266
267 struct rxbuff_ent {
268         struct sk_buff *skb;
269         dma_addr_t      dma_addr;
270 };
271
272 struct typhoon {
273         /* Tx cache line section */
274         struct transmit_ring    txLoRing        ____cacheline_aligned;
275         struct pci_dev *        tx_pdev;
276         void __iomem            *tx_ioaddr;
277         u32                     txlo_dma_addr;
278
279         /* Irq/Rx cache line section */
280         void __iomem            *ioaddr         ____cacheline_aligned;
281         struct typhoon_indexes *indexes;
282         u8                      awaiting_resp;
283         u8                      duplex;
284         u8                      speed;
285         u8                      card_state;
286         struct basic_ring       rxLoRing;
287         struct pci_dev *        pdev;
288         struct net_device *     dev;
289         struct napi_struct      napi;
290         spinlock_t              state_lock;
291         struct vlan_group *     vlgrp;
292         struct basic_ring       rxHiRing;
293         struct basic_ring       rxBuffRing;
294         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
295
296         /* general section */
297         spinlock_t              command_lock    ____cacheline_aligned;
298         struct basic_ring       cmdRing;
299         struct basic_ring       respRing;
300         struct net_device_stats stats;
301         struct net_device_stats stats_saved;
302         const char *            name;
303         struct typhoon_shared * shared;
304         dma_addr_t              shared_dma;
305         __le16                  xcvr_select;
306         __le16                  wol_events;
307         __le32                  offload;
308
309         /* unused stuff (future use) */
310         int                     capabilities;
311         struct transmit_ring    txHiRing;
312 };
313
314 enum completion_wait_values {
315         NoWait = 0, WaitNoSleep, WaitSleep,
316 };
317
318 /* These are the values for the typhoon.card_state variable.
319  * These determine where the statistics will come from in get_stats().
320  * The sleep image does not support the statistics we need.
321  */
322 enum state_values {
323         Sleeping = 0, Running,
324 };
325
326 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
327  * cannot pass a read, so this forces current writes to post.
328  */
329 #define typhoon_post_pci_writes(x) \
330         do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
331
332 /* We'll wait up to six seconds for a reset, and half a second normally.
333  */
334 #define TYPHOON_UDELAY                  50
335 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
336 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
337 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
338
339 #if defined(NETIF_F_TSO)
340 #define skb_tso_size(x)         (skb_shinfo(x)->gso_size)
341 #define TSO_NUM_DESCRIPTORS     2
342 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
343 #else
344 #define NETIF_F_TSO             0
345 #define skb_tso_size(x)         0
346 #define TSO_NUM_DESCRIPTORS     0
347 #define TSO_OFFLOAD_ON          0
348 #endif
349
350 static inline void
351 typhoon_inc_index(u32 *index, const int count, const int num_entries)
352 {
353         /* Increment a ring index -- we can use this for all rings execept
354          * the Rx rings, as they use different size descriptors
355          * otherwise, everything is the same size as a cmd_desc
356          */
357         *index += count * sizeof(struct cmd_desc);
358         *index %= num_entries * sizeof(struct cmd_desc);
359 }
360
361 static inline void
362 typhoon_inc_cmd_index(u32 *index, const int count)
363 {
364         typhoon_inc_index(index, count, COMMAND_ENTRIES);
365 }
366
367 static inline void
368 typhoon_inc_resp_index(u32 *index, const int count)
369 {
370         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
371 }
372
373 static inline void
374 typhoon_inc_rxfree_index(u32 *index, const int count)
375 {
376         typhoon_inc_index(index, count, RXFREE_ENTRIES);
377 }
378
379 static inline void
380 typhoon_inc_tx_index(u32 *index, const int count)
381 {
382         /* if we start using the Hi Tx ring, this needs updateing */
383         typhoon_inc_index(index, count, TXLO_ENTRIES);
384 }
385
386 static inline void
387 typhoon_inc_rx_index(u32 *index, const int count)
388 {
389         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
390         *index += count * sizeof(struct rx_desc);
391         *index %= RX_ENTRIES * sizeof(struct rx_desc);
392 }
393
394 static int
395 typhoon_reset(void __iomem *ioaddr, int wait_type)
396 {
397         int i, err = 0;
398         int timeout;
399
400         if(wait_type == WaitNoSleep)
401                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
402         else
403                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
404
405         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
406         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
407
408         iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
409         typhoon_post_pci_writes(ioaddr);
410         udelay(1);
411         iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
412
413         if(wait_type != NoWait) {
414                 for(i = 0; i < timeout; i++) {
415                         if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
416                            TYPHOON_STATUS_WAITING_FOR_HOST)
417                                 goto out;
418
419                         if(wait_type == WaitSleep)
420                                 schedule_timeout_uninterruptible(1);
421                         else
422                                 udelay(TYPHOON_UDELAY);
423                 }
424
425                 err = -ETIMEDOUT;
426         }
427
428 out:
429         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
430         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
431
432         /* The 3XP seems to need a little extra time to complete the load
433          * of the sleep image before we can reliably boot it. Failure to
434          * do this occasionally results in a hung adapter after boot in
435          * typhoon_init_one() while trying to read the MAC address or
436          * putting the card to sleep. 3Com's driver waits 5ms, but
437          * that seems to be overkill. However, if we can sleep, we might
438          * as well give it that much time. Otherwise, we'll give it 500us,
439          * which should be enough (I've see it work well at 100us, but still
440          * saw occasional problems.)
441          */
442         if(wait_type == WaitSleep)
443                 msleep(5);
444         else
445                 udelay(500);
446         return err;
447 }
448
449 static int
450 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
451 {
452         int i, err = 0;
453
454         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
455                 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
456                         goto out;
457                 udelay(TYPHOON_UDELAY);
458         }
459
460         err = -ETIMEDOUT;
461
462 out:
463         return err;
464 }
465
466 static inline void
467 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
468 {
469         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
470                 netif_carrier_off(dev);
471         else
472                 netif_carrier_on(dev);
473 }
474
475 static inline void
476 typhoon_hello(struct typhoon *tp)
477 {
478         struct basic_ring *ring = &tp->cmdRing;
479         struct cmd_desc *cmd;
480
481         /* We only get a hello request if we've not sent anything to the
482          * card in a long while. If the lock is held, then we're in the
483          * process of issuing a command, so we don't need to respond.
484          */
485         if(spin_trylock(&tp->command_lock)) {
486                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
487                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
488
489                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
490                 smp_wmb();
491                 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
492                 spin_unlock(&tp->command_lock);
493         }
494 }
495
496 static int
497 typhoon_process_response(struct typhoon *tp, int resp_size,
498                                 struct resp_desc *resp_save)
499 {
500         struct typhoon_indexes *indexes = tp->indexes;
501         struct resp_desc *resp;
502         u8 *base = tp->respRing.ringBase;
503         int count, len, wrap_len;
504         u32 cleared;
505         u32 ready;
506
507         cleared = le32_to_cpu(indexes->respCleared);
508         ready = le32_to_cpu(indexes->respReady);
509         while(cleared != ready) {
510                 resp = (struct resp_desc *)(base + cleared);
511                 count = resp->numDesc + 1;
512                 if(resp_save && resp->seqNo) {
513                         if(count > resp_size) {
514                                 resp_save->flags = TYPHOON_RESP_ERROR;
515                                 goto cleanup;
516                         }
517
518                         wrap_len = 0;
519                         len = count * sizeof(*resp);
520                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
521                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
522                                 len = RESPONSE_RING_SIZE - cleared;
523                         }
524
525                         memcpy(resp_save, resp, len);
526                         if(unlikely(wrap_len)) {
527                                 resp_save += len / sizeof(*resp);
528                                 memcpy(resp_save, base, wrap_len);
529                         }
530
531                         resp_save = NULL;
532                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
533                         typhoon_media_status(tp->dev, resp);
534                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
535                         typhoon_hello(tp);
536                 } else {
537                         printk(KERN_ERR "%s: dumping unexpected response "
538                                "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
539                                tp->name, le16_to_cpu(resp->cmd),
540                                resp->numDesc, resp->flags,
541                                le16_to_cpu(resp->parm1),
542                                le32_to_cpu(resp->parm2),
543                                le32_to_cpu(resp->parm3));
544                 }
545
546 cleanup:
547                 typhoon_inc_resp_index(&cleared, count);
548         }
549
550         indexes->respCleared = cpu_to_le32(cleared);
551         wmb();
552         return (resp_save == NULL);
553 }
554
555 static inline int
556 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
557 {
558         /* this works for all descriptors but rx_desc, as they are a
559          * different size than the cmd_desc -- everyone else is the same
560          */
561         lastWrite /= sizeof(struct cmd_desc);
562         lastRead /= sizeof(struct cmd_desc);
563         return (ringSize + lastRead - lastWrite - 1) % ringSize;
564 }
565
566 static inline int
567 typhoon_num_free_cmd(struct typhoon *tp)
568 {
569         int lastWrite = tp->cmdRing.lastWrite;
570         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
571
572         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
573 }
574
575 static inline int
576 typhoon_num_free_resp(struct typhoon *tp)
577 {
578         int respReady = le32_to_cpu(tp->indexes->respReady);
579         int respCleared = le32_to_cpu(tp->indexes->respCleared);
580
581         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
582 }
583
584 static inline int
585 typhoon_num_free_tx(struct transmit_ring *ring)
586 {
587         /* if we start using the Hi Tx ring, this needs updating */
588         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
589 }
590
591 static int
592 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
593                       int num_resp, struct resp_desc *resp)
594 {
595         struct typhoon_indexes *indexes = tp->indexes;
596         struct basic_ring *ring = &tp->cmdRing;
597         struct resp_desc local_resp;
598         int i, err = 0;
599         int got_resp;
600         int freeCmd, freeResp;
601         int len, wrap_len;
602
603         spin_lock(&tp->command_lock);
604
605         freeCmd = typhoon_num_free_cmd(tp);
606         freeResp = typhoon_num_free_resp(tp);
607
608         if(freeCmd < num_cmd || freeResp < num_resp) {
609                 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
610                         "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
611                         freeResp, num_resp);
612                 err = -ENOMEM;
613                 goto out;
614         }
615
616         if(cmd->flags & TYPHOON_CMD_RESPOND) {
617                 /* If we're expecting a response, but the caller hasn't given
618                  * us a place to put it, we'll provide one.
619                  */
620                 tp->awaiting_resp = 1;
621                 if(resp == NULL) {
622                         resp = &local_resp;
623                         num_resp = 1;
624                 }
625         }
626
627         wrap_len = 0;
628         len = num_cmd * sizeof(*cmd);
629         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
630                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
631                 len = COMMAND_RING_SIZE - ring->lastWrite;
632         }
633
634         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
635         if(unlikely(wrap_len)) {
636                 struct cmd_desc *wrap_ptr = cmd;
637                 wrap_ptr += len / sizeof(*cmd);
638                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
639         }
640
641         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
642
643         /* "I feel a presence... another warrior is on the mesa."
644          */
645         wmb();
646         iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
647         typhoon_post_pci_writes(tp->ioaddr);
648
649         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
650                 goto out;
651
652         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
653          * preempt or do anything other than take interrupts. So, don't
654          * wait for a response unless you have to.
655          *
656          * I've thought about trying to sleep here, but we're called
657          * from many contexts that don't allow that. Also, given the way
658          * 3Com has implemented irq coalescing, we would likely timeout --
659          * this has been observed in real life!
660          *
661          * The big killer is we have to wait to get stats from the card,
662          * though we could go to a periodic refresh of those if we don't
663          * mind them getting somewhat stale. The rest of the waiting
664          * commands occur during open/close/suspend/resume, so they aren't
665          * time critical. Creating SAs in the future will also have to
666          * wait here.
667          */
668         got_resp = 0;
669         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
670                 if(indexes->respCleared != indexes->respReady)
671                         got_resp = typhoon_process_response(tp, num_resp,
672                                                                 resp);
673                 udelay(TYPHOON_UDELAY);
674         }
675
676         if(!got_resp) {
677                 err = -ETIMEDOUT;
678                 goto out;
679         }
680
681         /* Collect the error response even if we don't care about the
682          * rest of the response
683          */
684         if(resp->flags & TYPHOON_RESP_ERROR)
685                 err = -EIO;
686
687 out:
688         if(tp->awaiting_resp) {
689                 tp->awaiting_resp = 0;
690                 smp_wmb();
691
692                 /* Ugh. If a response was added to the ring between
693                  * the call to typhoon_process_response() and the clearing
694                  * of tp->awaiting_resp, we could have missed the interrupt
695                  * and it could hang in the ring an indeterminate amount of
696                  * time. So, check for it, and interrupt ourselves if this
697                  * is the case.
698                  */
699                 if(indexes->respCleared != indexes->respReady)
700                         iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
701         }
702
703         spin_unlock(&tp->command_lock);
704         return err;
705 }
706
707 static void
708 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
709 {
710         struct typhoon *tp = netdev_priv(dev);
711         struct cmd_desc xp_cmd;
712         int err;
713
714         spin_lock_bh(&tp->state_lock);
715         if(!tp->vlgrp != !grp) {
716                 /* We've either been turned on for the first time, or we've
717                  * been turned off. Update the 3XP.
718                  */
719                 if(grp)
720                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
721                 else
722                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
723
724                 /* If the interface is up, the runtime is running -- and we
725                  * must be up for the vlan core to call us.
726                  *
727                  * Do the command outside of the spin lock, as it is slow.
728                  */
729                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
730                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
731                 xp_cmd.parm2 = tp->offload;
732                 xp_cmd.parm3 = tp->offload;
733                 spin_unlock_bh(&tp->state_lock);
734                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
735                 if(err < 0)
736                         printk("%s: vlan offload error %d\n", tp->name, -err);
737                 spin_lock_bh(&tp->state_lock);
738         }
739
740         /* now make the change visible */
741         tp->vlgrp = grp;
742         spin_unlock_bh(&tp->state_lock);
743 }
744
745 static inline void
746 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
747                         u32 ring_dma)
748 {
749         struct tcpopt_desc *tcpd;
750         u32 tcpd_offset = ring_dma;
751
752         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
753         tcpd_offset += txRing->lastWrite;
754         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
755         typhoon_inc_tx_index(&txRing->lastWrite, 1);
756
757         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
758         tcpd->numDesc = 1;
759         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
760         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
761         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
762         tcpd->bytesTx = cpu_to_le32(skb->len);
763         tcpd->status = 0;
764 }
765
766 static netdev_tx_t
767 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
768 {
769         struct typhoon *tp = netdev_priv(dev);
770         struct transmit_ring *txRing;
771         struct tx_desc *txd, *first_txd;
772         dma_addr_t skb_dma;
773         int numDesc;
774
775         /* we have two rings to choose from, but we only use txLo for now
776          * If we start using the Hi ring as well, we'll need to update
777          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
778          * and TXHI_ENTRIES to match, as well as update the TSO code below
779          * to get the right DMA address
780          */
781         txRing = &tp->txLoRing;
782
783         /* We need one descriptor for each fragment of the sk_buff, plus the
784          * one for the ->data area of it.
785          *
786          * The docs say a maximum of 16 fragment descriptors per TCP option
787          * descriptor, then make a new packet descriptor and option descriptor
788          * for the next 16 fragments. The engineers say just an option
789          * descriptor is needed. I've tested up to 26 fragments with a single
790          * packet descriptor/option descriptor combo, so I use that for now.
791          *
792          * If problems develop with TSO, check this first.
793          */
794         numDesc = skb_shinfo(skb)->nr_frags + 1;
795         if (skb_is_gso(skb))
796                 numDesc++;
797
798         /* When checking for free space in the ring, we need to also
799          * account for the initial Tx descriptor, and we always must leave
800          * at least one descriptor unused in the ring so that it doesn't
801          * wrap and look empty.
802          *
803          * The only time we should loop here is when we hit the race
804          * between marking the queue awake and updating the cleared index.
805          * Just loop and it will appear. This comes from the acenic driver.
806          */
807         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
808                 smp_rmb();
809
810         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
811         typhoon_inc_tx_index(&txRing->lastWrite, 1);
812
813         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
814         first_txd->numDesc = 0;
815         first_txd->len = 0;
816         first_txd->tx_addr = (u64)((unsigned long) skb);
817         first_txd->processFlags = 0;
818
819         if(skb->ip_summed == CHECKSUM_PARTIAL) {
820                 /* The 3XP will figure out if this is UDP/TCP */
821                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
822                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
823                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
824         }
825
826         if(vlan_tx_tag_present(skb)) {
827                 first_txd->processFlags |=
828                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
829                 first_txd->processFlags |=
830                     cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
831                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
832         }
833
834         if (skb_is_gso(skb)) {
835                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
836                 first_txd->numDesc++;
837
838                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
839         }
840
841         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
842         typhoon_inc_tx_index(&txRing->lastWrite, 1);
843
844         /* No need to worry about padding packet -- the firmware pads
845          * it with zeros to ETH_ZLEN for us.
846          */
847         if(skb_shinfo(skb)->nr_frags == 0) {
848                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
849                                        PCI_DMA_TODEVICE);
850                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
851                 txd->len = cpu_to_le16(skb->len);
852                 txd->frag.addr = cpu_to_le32(skb_dma);
853                 txd->frag.addrHi = 0;
854                 first_txd->numDesc++;
855         } else {
856                 int i, len;
857
858                 len = skb_headlen(skb);
859                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
860                                          PCI_DMA_TODEVICE);
861                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
862                 txd->len = cpu_to_le16(len);
863                 txd->frag.addr = cpu_to_le32(skb_dma);
864                 txd->frag.addrHi = 0;
865                 first_txd->numDesc++;
866
867                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
868                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
869                         void *frag_addr;
870
871                         txd = (struct tx_desc *) (txRing->ringBase +
872                                                 txRing->lastWrite);
873                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
874
875                         len = frag->size;
876                         frag_addr = (void *) page_address(frag->page) +
877                                                 frag->page_offset;
878                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
879                                          PCI_DMA_TODEVICE);
880                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
881                         txd->len = cpu_to_le16(len);
882                         txd->frag.addr = cpu_to_le32(skb_dma);
883                         txd->frag.addrHi = 0;
884                         first_txd->numDesc++;
885                 }
886         }
887
888         /* Kick the 3XP
889          */
890         wmb();
891         iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
892
893         dev->trans_start = jiffies;
894
895         /* If we don't have room to put the worst case packet on the
896          * queue, then we must stop the queue. We need 2 extra
897          * descriptors -- one to prevent ring wrap, and one for the
898          * Tx header.
899          */
900         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
901
902         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
903                 netif_stop_queue(dev);
904
905                 /* A Tx complete IRQ could have gotten inbetween, making
906                  * the ring free again. Only need to recheck here, since
907                  * Tx is serialized.
908                  */
909                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
910                         netif_wake_queue(dev);
911         }
912
913         return NETDEV_TX_OK;
914 }
915
916 static void
917 typhoon_set_rx_mode(struct net_device *dev)
918 {
919         struct typhoon *tp = netdev_priv(dev);
920         struct cmd_desc xp_cmd;
921         u32 mc_filter[2];
922         __le16 filter;
923
924         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
925         if(dev->flags & IFF_PROMISC) {
926                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
927         } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
928                   (dev->flags & IFF_ALLMULTI)) {
929                 /* Too many to match, or accept all multicasts. */
930                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
931         } else if (!netdev_mc_empty(dev)) {
932                 struct dev_mc_list *mclist;
933                 int i;
934
935                 memset(mc_filter, 0, sizeof(mc_filter));
936                 for (i = 0, mclist = dev->mc_list;
937                      mclist && i < netdev_mc_count(dev);
938                      i++, mclist = mclist->next) {
939                         int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
940                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
941                 }
942
943                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
944                                          TYPHOON_CMD_SET_MULTICAST_HASH);
945                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
946                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
947                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
948                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
949
950                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
951         }
952
953         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
954         xp_cmd.parm1 = filter;
955         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
956 }
957
958 static int
959 typhoon_do_get_stats(struct typhoon *tp)
960 {
961         struct net_device_stats *stats = &tp->stats;
962         struct net_device_stats *saved = &tp->stats_saved;
963         struct cmd_desc xp_cmd;
964         struct resp_desc xp_resp[7];
965         struct stats_resp *s = (struct stats_resp *) xp_resp;
966         int err;
967
968         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
969         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
970         if(err < 0)
971                 return err;
972
973         /* 3Com's Linux driver uses txMultipleCollisions as it's
974          * collisions value, but there is some other collision info as well...
975          *
976          * The extra status reported would be a good candidate for
977          * ethtool_ops->get_{strings,stats}()
978          */
979         stats->tx_packets = le32_to_cpu(s->txPackets);
980         stats->tx_bytes = le64_to_cpu(s->txBytes);
981         stats->tx_errors = le32_to_cpu(s->txCarrierLost);
982         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
983         stats->collisions = le32_to_cpu(s->txMultipleCollisions);
984         stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
985         stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
986         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
987         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
988                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
989         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
990         stats->rx_length_errors = le32_to_cpu(s->rxOversized);
991         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
992                         SPEED_100 : SPEED_10;
993         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
994                         DUPLEX_FULL : DUPLEX_HALF;
995
996         /* add in the saved statistics
997          */
998         stats->tx_packets += saved->tx_packets;
999         stats->tx_bytes += saved->tx_bytes;
1000         stats->tx_errors += saved->tx_errors;
1001         stats->collisions += saved->collisions;
1002         stats->rx_packets += saved->rx_packets;
1003         stats->rx_bytes += saved->rx_bytes;
1004         stats->rx_fifo_errors += saved->rx_fifo_errors;
1005         stats->rx_errors += saved->rx_errors;
1006         stats->rx_crc_errors += saved->rx_crc_errors;
1007         stats->rx_length_errors += saved->rx_length_errors;
1008
1009         return 0;
1010 }
1011
1012 static struct net_device_stats *
1013 typhoon_get_stats(struct net_device *dev)
1014 {
1015         struct typhoon *tp = netdev_priv(dev);
1016         struct net_device_stats *stats = &tp->stats;
1017         struct net_device_stats *saved = &tp->stats_saved;
1018
1019         smp_rmb();
1020         if(tp->card_state == Sleeping)
1021                 return saved;
1022
1023         if(typhoon_do_get_stats(tp) < 0) {
1024                 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1025                 return saved;
1026         }
1027
1028         return stats;
1029 }
1030
1031 static int
1032 typhoon_set_mac_address(struct net_device *dev, void *addr)
1033 {
1034         struct sockaddr *saddr = (struct sockaddr *) addr;
1035
1036         if(netif_running(dev))
1037                 return -EBUSY;
1038
1039         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1040         return 0;
1041 }
1042
1043 static void
1044 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1045 {
1046         struct typhoon *tp = netdev_priv(dev);
1047         struct pci_dev *pci_dev = tp->pdev;
1048         struct cmd_desc xp_cmd;
1049         struct resp_desc xp_resp[3];
1050
1051         smp_rmb();
1052         if(tp->card_state == Sleeping) {
1053                 strcpy(info->fw_version, "Sleep image");
1054         } else {
1055                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1056                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1057                         strcpy(info->fw_version, "Unknown runtime");
1058                 } else {
1059                         u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1060                         snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1061                                  sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1062                                  sleep_ver & 0xfff);
1063                 }
1064         }
1065
1066         strcpy(info->driver, DRV_MODULE_NAME);
1067         strcpy(info->version, DRV_MODULE_VERSION);
1068         strcpy(info->bus_info, pci_name(pci_dev));
1069 }
1070
1071 static int
1072 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1073 {
1074         struct typhoon *tp = netdev_priv(dev);
1075
1076         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1077                                 SUPPORTED_Autoneg;
1078
1079         switch (tp->xcvr_select) {
1080         case TYPHOON_XCVR_10HALF:
1081                 cmd->advertising = ADVERTISED_10baseT_Half;
1082                 break;
1083         case TYPHOON_XCVR_10FULL:
1084                 cmd->advertising = ADVERTISED_10baseT_Full;
1085                 break;
1086         case TYPHOON_XCVR_100HALF:
1087                 cmd->advertising = ADVERTISED_100baseT_Half;
1088                 break;
1089         case TYPHOON_XCVR_100FULL:
1090                 cmd->advertising = ADVERTISED_100baseT_Full;
1091                 break;
1092         case TYPHOON_XCVR_AUTONEG:
1093                 cmd->advertising = ADVERTISED_10baseT_Half |
1094                                             ADVERTISED_10baseT_Full |
1095                                             ADVERTISED_100baseT_Half |
1096                                             ADVERTISED_100baseT_Full |
1097                                             ADVERTISED_Autoneg;
1098                 break;
1099         }
1100
1101         if(tp->capabilities & TYPHOON_FIBER) {
1102                 cmd->supported |= SUPPORTED_FIBRE;
1103                 cmd->advertising |= ADVERTISED_FIBRE;
1104                 cmd->port = PORT_FIBRE;
1105         } else {
1106                 cmd->supported |= SUPPORTED_10baseT_Half |
1107                                         SUPPORTED_10baseT_Full |
1108                                         SUPPORTED_TP;
1109                 cmd->advertising |= ADVERTISED_TP;
1110                 cmd->port = PORT_TP;
1111         }
1112
1113         /* need to get stats to make these link speed/duplex valid */
1114         typhoon_do_get_stats(tp);
1115         cmd->speed = tp->speed;
1116         cmd->duplex = tp->duplex;
1117         cmd->phy_address = 0;
1118         cmd->transceiver = XCVR_INTERNAL;
1119         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1120                 cmd->autoneg = AUTONEG_ENABLE;
1121         else
1122                 cmd->autoneg = AUTONEG_DISABLE;
1123         cmd->maxtxpkt = 1;
1124         cmd->maxrxpkt = 1;
1125
1126         return 0;
1127 }
1128
1129 static int
1130 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1131 {
1132         struct typhoon *tp = netdev_priv(dev);
1133         struct cmd_desc xp_cmd;
1134         __le16 xcvr;
1135         int err;
1136
1137         err = -EINVAL;
1138         if(cmd->autoneg == AUTONEG_ENABLE) {
1139                 xcvr = TYPHOON_XCVR_AUTONEG;
1140         } else {
1141                 if(cmd->duplex == DUPLEX_HALF) {
1142                         if(cmd->speed == SPEED_10)
1143                                 xcvr = TYPHOON_XCVR_10HALF;
1144                         else if(cmd->speed == SPEED_100)
1145                                 xcvr = TYPHOON_XCVR_100HALF;
1146                         else
1147                                 goto out;
1148                 } else if(cmd->duplex == DUPLEX_FULL) {
1149                         if(cmd->speed == SPEED_10)
1150                                 xcvr = TYPHOON_XCVR_10FULL;
1151                         else if(cmd->speed == SPEED_100)
1152                                 xcvr = TYPHOON_XCVR_100FULL;
1153                         else
1154                                 goto out;
1155                 } else
1156                         goto out;
1157         }
1158
1159         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1160         xp_cmd.parm1 = xcvr;
1161         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1162         if(err < 0)
1163                 goto out;
1164
1165         tp->xcvr_select = xcvr;
1166         if(cmd->autoneg == AUTONEG_ENABLE) {
1167                 tp->speed = 0xff;       /* invalid */
1168                 tp->duplex = 0xff;      /* invalid */
1169         } else {
1170                 tp->speed = cmd->speed;
1171                 tp->duplex = cmd->duplex;
1172         }
1173
1174 out:
1175         return err;
1176 }
1177
1178 static void
1179 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1180 {
1181         struct typhoon *tp = netdev_priv(dev);
1182
1183         wol->supported = WAKE_PHY | WAKE_MAGIC;
1184         wol->wolopts = 0;
1185         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1186                 wol->wolopts |= WAKE_PHY;
1187         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1188                 wol->wolopts |= WAKE_MAGIC;
1189         memset(&wol->sopass, 0, sizeof(wol->sopass));
1190 }
1191
1192 static int
1193 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1194 {
1195         struct typhoon *tp = netdev_priv(dev);
1196
1197         if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1198                 return -EINVAL;
1199
1200         tp->wol_events = 0;
1201         if(wol->wolopts & WAKE_PHY)
1202                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1203         if(wol->wolopts & WAKE_MAGIC)
1204                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1205
1206         return 0;
1207 }
1208
1209 static u32
1210 typhoon_get_rx_csum(struct net_device *dev)
1211 {
1212         /* For now, we don't allow turning off RX checksums.
1213          */
1214         return 1;
1215 }
1216
1217 static void
1218 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1219 {
1220         ering->rx_max_pending = RXENT_ENTRIES;
1221         ering->rx_mini_max_pending = 0;
1222         ering->rx_jumbo_max_pending = 0;
1223         ering->tx_max_pending = TXLO_ENTRIES - 1;
1224
1225         ering->rx_pending = RXENT_ENTRIES;
1226         ering->rx_mini_pending = 0;
1227         ering->rx_jumbo_pending = 0;
1228         ering->tx_pending = TXLO_ENTRIES - 1;
1229 }
1230
1231 static const struct ethtool_ops typhoon_ethtool_ops = {
1232         .get_settings           = typhoon_get_settings,
1233         .set_settings           = typhoon_set_settings,
1234         .get_drvinfo            = typhoon_get_drvinfo,
1235         .get_wol                = typhoon_get_wol,
1236         .set_wol                = typhoon_set_wol,
1237         .get_link               = ethtool_op_get_link,
1238         .get_rx_csum            = typhoon_get_rx_csum,
1239         .set_tx_csum            = ethtool_op_set_tx_csum,
1240         .set_sg                 = ethtool_op_set_sg,
1241         .set_tso                = ethtool_op_set_tso,
1242         .get_ringparam          = typhoon_get_ringparam,
1243 };
1244
1245 static int
1246 typhoon_wait_interrupt(void __iomem *ioaddr)
1247 {
1248         int i, err = 0;
1249
1250         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1251                 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1252                    TYPHOON_INTR_BOOTCMD)
1253                         goto out;
1254                 udelay(TYPHOON_UDELAY);
1255         }
1256
1257         err = -ETIMEDOUT;
1258
1259 out:
1260         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1261         return err;
1262 }
1263
1264 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1265
1266 static void
1267 typhoon_init_interface(struct typhoon *tp)
1268 {
1269         struct typhoon_interface *iface = &tp->shared->iface;
1270         dma_addr_t shared_dma;
1271
1272         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1273
1274         /* The *Hi members of iface are all init'd to zero by the memset().
1275          */
1276         shared_dma = tp->shared_dma + shared_offset(indexes);
1277         iface->ringIndex = cpu_to_le32(shared_dma);
1278
1279         shared_dma = tp->shared_dma + shared_offset(txLo);
1280         iface->txLoAddr = cpu_to_le32(shared_dma);
1281         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1282
1283         shared_dma = tp->shared_dma + shared_offset(txHi);
1284         iface->txHiAddr = cpu_to_le32(shared_dma);
1285         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1286
1287         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1288         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1289         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1290                                         sizeof(struct rx_free));
1291
1292         shared_dma = tp->shared_dma + shared_offset(rxLo);
1293         iface->rxLoAddr = cpu_to_le32(shared_dma);
1294         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1295
1296         shared_dma = tp->shared_dma + shared_offset(rxHi);
1297         iface->rxHiAddr = cpu_to_le32(shared_dma);
1298         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1299
1300         shared_dma = tp->shared_dma + shared_offset(cmd);
1301         iface->cmdAddr = cpu_to_le32(shared_dma);
1302         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1303
1304         shared_dma = tp->shared_dma + shared_offset(resp);
1305         iface->respAddr = cpu_to_le32(shared_dma);
1306         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1307
1308         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1309         iface->zeroAddr = cpu_to_le32(shared_dma);
1310
1311         tp->indexes = &tp->shared->indexes;
1312         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1313         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1314         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1315         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1316         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1317         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1318         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1319
1320         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1321         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1322
1323         tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1324         tp->card_state = Sleeping;
1325         smp_wmb();
1326
1327         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1328         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1329
1330         spin_lock_init(&tp->command_lock);
1331         spin_lock_init(&tp->state_lock);
1332 }
1333
1334 static void
1335 typhoon_init_rings(struct typhoon *tp)
1336 {
1337         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1338
1339         tp->txLoRing.lastWrite = 0;
1340         tp->txHiRing.lastWrite = 0;
1341         tp->rxLoRing.lastWrite = 0;
1342         tp->rxHiRing.lastWrite = 0;
1343         tp->rxBuffRing.lastWrite = 0;
1344         tp->cmdRing.lastWrite = 0;
1345         tp->cmdRing.lastWrite = 0;
1346
1347         tp->txLoRing.lastRead = 0;
1348         tp->txHiRing.lastRead = 0;
1349 }
1350
1351 static const struct firmware *typhoon_fw;
1352
1353 static int
1354 typhoon_request_firmware(struct typhoon *tp)
1355 {
1356         const struct typhoon_file_header *fHdr;
1357         const struct typhoon_section_header *sHdr;
1358         const u8 *image_data;
1359         u32 numSections;
1360         u32 section_len;
1361         u32 remaining;
1362         int err;
1363
1364         if (typhoon_fw)
1365                 return 0;
1366
1367         err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1368         if (err) {
1369                 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
1370                                 tp->name, FIRMWARE_NAME);
1371                 return err;
1372         }
1373
1374         image_data = (u8 *) typhoon_fw->data;
1375         remaining = typhoon_fw->size;
1376         if (remaining < sizeof(struct typhoon_file_header))
1377                 goto invalid_fw;
1378
1379         fHdr = (struct typhoon_file_header *) image_data;
1380         if (memcmp(fHdr->tag, "TYPHOON", 8))
1381                 goto invalid_fw;
1382
1383         numSections = le32_to_cpu(fHdr->numSections);
1384         image_data += sizeof(struct typhoon_file_header);
1385         remaining -= sizeof(struct typhoon_file_header);
1386
1387         while (numSections--) {
1388                 if (remaining < sizeof(struct typhoon_section_header))
1389                         goto invalid_fw;
1390
1391                 sHdr = (struct typhoon_section_header *) image_data;
1392                 image_data += sizeof(struct typhoon_section_header);
1393                 section_len = le32_to_cpu(sHdr->len);
1394
1395                 if (remaining < section_len)
1396                         goto invalid_fw;
1397
1398                 image_data += section_len;
1399                 remaining -= section_len;
1400         }
1401
1402         return 0;
1403
1404 invalid_fw:
1405         printk(KERN_ERR "%s: Invalid firmware image\n", tp->name);
1406         release_firmware(typhoon_fw);
1407         typhoon_fw = NULL;
1408         return -EINVAL;
1409 }
1410
1411 static int
1412 typhoon_download_firmware(struct typhoon *tp)
1413 {
1414         void __iomem *ioaddr = tp->ioaddr;
1415         struct pci_dev *pdev = tp->pdev;
1416         const struct typhoon_file_header *fHdr;
1417         const struct typhoon_section_header *sHdr;
1418         const u8 *image_data;
1419         void *dpage;
1420         dma_addr_t dpage_dma;
1421         __sum16 csum;
1422         u32 irqEnabled;
1423         u32 irqMasked;
1424         u32 numSections;
1425         u32 section_len;
1426         u32 len;
1427         u32 load_addr;
1428         u32 hmac;
1429         int i;
1430         int err;
1431
1432         image_data = (u8 *) typhoon_fw->data;
1433         fHdr = (struct typhoon_file_header *) image_data;
1434
1435         /* Cannot just map the firmware image using pci_map_single() as
1436          * the firmware is vmalloc()'d and may not be physically contiguous,
1437          * so we allocate some consistent memory to copy the sections into.
1438          */
1439         err = -ENOMEM;
1440         dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1441         if(!dpage) {
1442                 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1443                 goto err_out;
1444         }
1445
1446         irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1447         iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1448                ioaddr + TYPHOON_REG_INTR_ENABLE);
1449         irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1450         iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1451                ioaddr + TYPHOON_REG_INTR_MASK);
1452
1453         err = -ETIMEDOUT;
1454         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1455                 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1456                 goto err_out_irq;
1457         }
1458
1459         numSections = le32_to_cpu(fHdr->numSections);
1460         load_addr = le32_to_cpu(fHdr->startAddr);
1461
1462         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1463         iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1464         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1465         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1466         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1467         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1468         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1469         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1470         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1471         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1472         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1473         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1474         typhoon_post_pci_writes(ioaddr);
1475         iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1476
1477         image_data += sizeof(struct typhoon_file_header);
1478
1479         /* The ioread32() in typhoon_wait_interrupt() will force the
1480          * last write to the command register to post, so
1481          * we don't need a typhoon_post_pci_writes() after it.
1482          */
1483         for(i = 0; i < numSections; i++) {
1484                 sHdr = (struct typhoon_section_header *) image_data;
1485                 image_data += sizeof(struct typhoon_section_header);
1486                 load_addr = le32_to_cpu(sHdr->startAddr);
1487                 section_len = le32_to_cpu(sHdr->len);
1488
1489                 while(section_len) {
1490                         len = min_t(u32, section_len, PAGE_SIZE);
1491
1492                         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1493                            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1494                            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1495                                 printk(KERN_ERR "%s: segment ready timeout\n",
1496                                        tp->name);
1497                                 goto err_out_irq;
1498                         }
1499
1500                         /* Do an pseudo IPv4 checksum on the data -- first
1501                          * need to convert each u16 to cpu order before
1502                          * summing. Fortunately, due to the properties of
1503                          * the checksum, we can do this once, at the end.
1504                          */
1505                         csum = csum_fold(csum_partial_copy_nocheck(image_data,
1506                                                                   dpage, len,
1507                                                                   0));
1508
1509                         iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1510                         iowrite32(le16_to_cpu((__force __le16)csum),
1511                                         ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1512                         iowrite32(load_addr,
1513                                         ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1514                         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1515                         iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1516                         typhoon_post_pci_writes(ioaddr);
1517                         iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1518                                ioaddr + TYPHOON_REG_COMMAND);
1519
1520                         image_data += len;
1521                         load_addr += len;
1522                         section_len -= len;
1523                 }
1524         }
1525
1526         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1527            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1528            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1529                 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1530                 goto err_out_irq;
1531         }
1532
1533         iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1534
1535         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1536                 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1537                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1538                 goto err_out_irq;
1539         }
1540
1541         err = 0;
1542
1543 err_out_irq:
1544         iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1545         iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1546
1547         pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1548
1549 err_out:
1550         return err;
1551 }
1552
1553 static int
1554 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1555 {
1556         void __iomem *ioaddr = tp->ioaddr;
1557
1558         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1559                 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1560                 goto out_timeout;
1561         }
1562
1563         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1564         iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1565         typhoon_post_pci_writes(ioaddr);
1566         iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1567                                 ioaddr + TYPHOON_REG_COMMAND);
1568
1569         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1570                 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1571                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1572                 goto out_timeout;
1573         }
1574
1575         /* Clear the Transmit and Command ready registers
1576          */
1577         iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1578         iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1579         iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1580         typhoon_post_pci_writes(ioaddr);
1581         iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1582
1583         return 0;
1584
1585 out_timeout:
1586         return -ETIMEDOUT;
1587 }
1588
1589 static u32
1590 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1591                         volatile __le32 * index)
1592 {
1593         u32 lastRead = txRing->lastRead;
1594         struct tx_desc *tx;
1595         dma_addr_t skb_dma;
1596         int dma_len;
1597         int type;
1598
1599         while(lastRead != le32_to_cpu(*index)) {
1600                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1601                 type = tx->flags & TYPHOON_TYPE_MASK;
1602
1603                 if(type == TYPHOON_TX_DESC) {
1604                         /* This tx_desc describes a packet.
1605                          */
1606                         unsigned long ptr = tx->tx_addr;
1607                         struct sk_buff *skb = (struct sk_buff *) ptr;
1608                         dev_kfree_skb_irq(skb);
1609                 } else if(type == TYPHOON_FRAG_DESC) {
1610                         /* This tx_desc describes a memory mapping. Free it.
1611                          */
1612                         skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1613                         dma_len = le16_to_cpu(tx->len);
1614                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1615                                        PCI_DMA_TODEVICE);
1616                 }
1617
1618                 tx->flags = 0;
1619                 typhoon_inc_tx_index(&lastRead, 1);
1620         }
1621
1622         return lastRead;
1623 }
1624
1625 static void
1626 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1627                         volatile __le32 * index)
1628 {
1629         u32 lastRead;
1630         int numDesc = MAX_SKB_FRAGS + 1;
1631
1632         /* This will need changing if we start to use the Hi Tx ring. */
1633         lastRead = typhoon_clean_tx(tp, txRing, index);
1634         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1635                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1636                 netif_wake_queue(tp->dev);
1637
1638         txRing->lastRead = lastRead;
1639         smp_wmb();
1640 }
1641
1642 static void
1643 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1644 {
1645         struct typhoon_indexes *indexes = tp->indexes;
1646         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1647         struct basic_ring *ring = &tp->rxBuffRing;
1648         struct rx_free *r;
1649
1650         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1651                                 le32_to_cpu(indexes->rxBuffCleared)) {
1652                 /* no room in ring, just drop the skb
1653                  */
1654                 dev_kfree_skb_any(rxb->skb);
1655                 rxb->skb = NULL;
1656                 return;
1657         }
1658
1659         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1660         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1661         r->virtAddr = idx;
1662         r->physAddr = cpu_to_le32(rxb->dma_addr);
1663
1664         /* Tell the card about it */
1665         wmb();
1666         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1667 }
1668
1669 static int
1670 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1671 {
1672         struct typhoon_indexes *indexes = tp->indexes;
1673         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1674         struct basic_ring *ring = &tp->rxBuffRing;
1675         struct rx_free *r;
1676         struct sk_buff *skb;
1677         dma_addr_t dma_addr;
1678
1679         rxb->skb = NULL;
1680
1681         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1682                                 le32_to_cpu(indexes->rxBuffCleared))
1683                 return -ENOMEM;
1684
1685         skb = dev_alloc_skb(PKT_BUF_SZ);
1686         if(!skb)
1687                 return -ENOMEM;
1688
1689 #if 0
1690         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1691          * address! Pretty please?
1692          */
1693         skb_reserve(skb, 2);
1694 #endif
1695
1696         skb->dev = tp->dev;
1697         dma_addr = pci_map_single(tp->pdev, skb->data,
1698                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1699
1700         /* Since no card does 64 bit DAC, the high bits will never
1701          * change from zero.
1702          */
1703         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1704         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1705         r->virtAddr = idx;
1706         r->physAddr = cpu_to_le32(dma_addr);
1707         rxb->skb = skb;
1708         rxb->dma_addr = dma_addr;
1709
1710         /* Tell the card about it */
1711         wmb();
1712         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1713         return 0;
1714 }
1715
1716 static int
1717 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1718            volatile __le32 * cleared, int budget)
1719 {
1720         struct rx_desc *rx;
1721         struct sk_buff *skb, *new_skb;
1722         struct rxbuff_ent *rxb;
1723         dma_addr_t dma_addr;
1724         u32 local_ready;
1725         u32 rxaddr;
1726         int pkt_len;
1727         u32 idx;
1728         __le32 csum_bits;
1729         int received;
1730
1731         received = 0;
1732         local_ready = le32_to_cpu(*ready);
1733         rxaddr = le32_to_cpu(*cleared);
1734         while(rxaddr != local_ready && budget > 0) {
1735                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1736                 idx = rx->addr;
1737                 rxb = &tp->rxbuffers[idx];
1738                 skb = rxb->skb;
1739                 dma_addr = rxb->dma_addr;
1740
1741                 typhoon_inc_rx_index(&rxaddr, 1);
1742
1743                 if(rx->flags & TYPHOON_RX_ERROR) {
1744                         typhoon_recycle_rx_skb(tp, idx);
1745                         continue;
1746                 }
1747
1748                 pkt_len = le16_to_cpu(rx->frameLen);
1749
1750                 if(pkt_len < rx_copybreak &&
1751                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1752                         skb_reserve(new_skb, 2);
1753                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1754                                                     PKT_BUF_SZ,
1755                                                     PCI_DMA_FROMDEVICE);
1756                         skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1757                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1758                                                        PKT_BUF_SZ,
1759                                                        PCI_DMA_FROMDEVICE);
1760                         skb_put(new_skb, pkt_len);
1761                         typhoon_recycle_rx_skb(tp, idx);
1762                 } else {
1763                         new_skb = skb;
1764                         skb_put(new_skb, pkt_len);
1765                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1766                                        PCI_DMA_FROMDEVICE);
1767                         typhoon_alloc_rx_skb(tp, idx);
1768                 }
1769                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1770                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1771                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1772                 if(csum_bits ==
1773                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1774                    csum_bits ==
1775                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1776                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1777                 } else
1778                         new_skb->ip_summed = CHECKSUM_NONE;
1779
1780                 spin_lock(&tp->state_lock);
1781                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1782                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1783                                                  ntohl(rx->vlanTag) & 0xffff);
1784                 else
1785                         netif_receive_skb(new_skb);
1786                 spin_unlock(&tp->state_lock);
1787
1788                 received++;
1789                 budget--;
1790         }
1791         *cleared = cpu_to_le32(rxaddr);
1792
1793         return received;
1794 }
1795
1796 static void
1797 typhoon_fill_free_ring(struct typhoon *tp)
1798 {
1799         u32 i;
1800
1801         for(i = 0; i < RXENT_ENTRIES; i++) {
1802                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1803                 if(rxb->skb)
1804                         continue;
1805                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1806                         break;
1807         }
1808 }
1809
1810 static int
1811 typhoon_poll(struct napi_struct *napi, int budget)
1812 {
1813         struct typhoon *tp = container_of(napi, struct typhoon, napi);
1814         struct typhoon_indexes *indexes = tp->indexes;
1815         int work_done;
1816
1817         rmb();
1818         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1819                         typhoon_process_response(tp, 0, NULL);
1820
1821         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1822                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1823
1824         work_done = 0;
1825
1826         if(indexes->rxHiCleared != indexes->rxHiReady) {
1827                 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1828                                         &indexes->rxHiCleared, budget);
1829         }
1830
1831         if(indexes->rxLoCleared != indexes->rxLoReady) {
1832                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1833                                         &indexes->rxLoCleared, budget - work_done);
1834         }
1835
1836         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1837                 /* rxBuff ring is empty, try to fill it. */
1838                 typhoon_fill_free_ring(tp);
1839         }
1840
1841         if (work_done < budget) {
1842                 napi_complete(napi);
1843                 iowrite32(TYPHOON_INTR_NONE,
1844                                 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1845                 typhoon_post_pci_writes(tp->ioaddr);
1846         }
1847
1848         return work_done;
1849 }
1850
1851 static irqreturn_t
1852 typhoon_interrupt(int irq, void *dev_instance)
1853 {
1854         struct net_device *dev = dev_instance;
1855         struct typhoon *tp = netdev_priv(dev);
1856         void __iomem *ioaddr = tp->ioaddr;
1857         u32 intr_status;
1858
1859         intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1860         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1861                 return IRQ_NONE;
1862
1863         iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1864
1865         if (napi_schedule_prep(&tp->napi)) {
1866                 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1867                 typhoon_post_pci_writes(ioaddr);
1868                 __napi_schedule(&tp->napi);
1869         } else {
1870                 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1871                        dev->name);
1872         }
1873         return IRQ_HANDLED;
1874 }
1875
1876 static void
1877 typhoon_free_rx_rings(struct typhoon *tp)
1878 {
1879         u32 i;
1880
1881         for(i = 0; i < RXENT_ENTRIES; i++) {
1882                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1883                 if(rxb->skb) {
1884                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1885                                        PCI_DMA_FROMDEVICE);
1886                         dev_kfree_skb(rxb->skb);
1887                         rxb->skb = NULL;
1888                 }
1889         }
1890 }
1891
1892 static int
1893 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1894 {
1895         struct pci_dev *pdev = tp->pdev;
1896         void __iomem *ioaddr = tp->ioaddr;
1897         struct cmd_desc xp_cmd;
1898         int err;
1899
1900         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1901         xp_cmd.parm1 = events;
1902         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1903         if(err < 0) {
1904                 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1905                                 tp->name, err);
1906                 return err;
1907         }
1908
1909         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1910         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1911         if(err < 0) {
1912                 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1913                                 tp->name, err);
1914                 return err;
1915         }
1916
1917         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1918                 return -ETIMEDOUT;
1919
1920         /* Since we cannot monitor the status of the link while sleeping,
1921          * tell the world it went away.
1922          */
1923         netif_carrier_off(tp->dev);
1924
1925         pci_enable_wake(tp->pdev, state, 1);
1926         pci_disable_device(pdev);
1927         return pci_set_power_state(pdev, state);
1928 }
1929
1930 static int
1931 typhoon_wakeup(struct typhoon *tp, int wait_type)
1932 {
1933         struct pci_dev *pdev = tp->pdev;
1934         void __iomem *ioaddr = tp->ioaddr;
1935
1936         pci_set_power_state(pdev, PCI_D0);
1937         pci_restore_state(pdev);
1938
1939         /* Post 2.x.x versions of the Sleep Image require a reset before
1940          * we can download the Runtime Image. But let's not make users of
1941          * the old firmware pay for the reset.
1942          */
1943         iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1944         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1945                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1946                 return typhoon_reset(ioaddr, wait_type);
1947
1948         return 0;
1949 }
1950
1951 static int
1952 typhoon_start_runtime(struct typhoon *tp)
1953 {
1954         struct net_device *dev = tp->dev;
1955         void __iomem *ioaddr = tp->ioaddr;
1956         struct cmd_desc xp_cmd;
1957         int err;
1958
1959         typhoon_init_rings(tp);
1960         typhoon_fill_free_ring(tp);
1961
1962         err = typhoon_download_firmware(tp);
1963         if(err < 0) {
1964                 printk("%s: cannot load runtime on 3XP\n", tp->name);
1965                 goto error_out;
1966         }
1967
1968         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1969                 printk("%s: cannot boot 3XP\n", tp->name);
1970                 err = -EIO;
1971                 goto error_out;
1972         }
1973
1974         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1975         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1976         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1977         if(err < 0)
1978                 goto error_out;
1979
1980         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1981         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1982         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1983         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1984         if(err < 0)
1985                 goto error_out;
1986
1987         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1988          * us some more information on how to control it.
1989          */
1990         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1991         xp_cmd.parm1 = 0;
1992         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1993         if(err < 0)
1994                 goto error_out;
1995
1996         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1997         xp_cmd.parm1 = tp->xcvr_select;
1998         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1999         if(err < 0)
2000                 goto error_out;
2001
2002         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
2003         xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
2004         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2005         if(err < 0)
2006                 goto error_out;
2007
2008         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
2009         spin_lock_bh(&tp->state_lock);
2010         xp_cmd.parm2 = tp->offload;
2011         xp_cmd.parm3 = tp->offload;
2012         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2013         spin_unlock_bh(&tp->state_lock);
2014         if(err < 0)
2015                 goto error_out;
2016
2017         typhoon_set_rx_mode(dev);
2018
2019         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2020         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2021         if(err < 0)
2022                 goto error_out;
2023
2024         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2025         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2026         if(err < 0)
2027                 goto error_out;
2028
2029         tp->card_state = Running;
2030         smp_wmb();
2031
2032         iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2033         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2034         typhoon_post_pci_writes(ioaddr);
2035
2036         return 0;
2037
2038 error_out:
2039         typhoon_reset(ioaddr, WaitNoSleep);
2040         typhoon_free_rx_rings(tp);
2041         typhoon_init_rings(tp);
2042         return err;
2043 }
2044
2045 static int
2046 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2047 {
2048         struct typhoon_indexes *indexes = tp->indexes;
2049         struct transmit_ring *txLo = &tp->txLoRing;
2050         void __iomem *ioaddr = tp->ioaddr;
2051         struct cmd_desc xp_cmd;
2052         int i;
2053
2054         /* Disable interrupts early, since we can't schedule a poll
2055          * when called with !netif_running(). This will be posted
2056          * when we force the posting of the command.
2057          */
2058         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2059
2060         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2061         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2062
2063         /* Wait 1/2 sec for any outstanding transmits to occur
2064          * We'll cleanup after the reset if this times out.
2065          */
2066         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2067                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2068                         break;
2069                 udelay(TYPHOON_UDELAY);
2070         }
2071
2072         if(i == TYPHOON_WAIT_TIMEOUT)
2073                 printk(KERN_ERR
2074                        "%s: halt timed out waiting for Tx to complete\n",
2075                        tp->name);
2076
2077         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2078         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2079
2080         /* save the statistics so when we bring the interface up again,
2081          * the values reported to userspace are correct.
2082          */
2083         tp->card_state = Sleeping;
2084         smp_wmb();
2085         typhoon_do_get_stats(tp);
2086         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2087
2088         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2089         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2090
2091         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2092                 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2093                        tp->name);
2094
2095         if(typhoon_reset(ioaddr, wait_type) < 0) {
2096                 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2097                 return -ETIMEDOUT;
2098         }
2099
2100         /* cleanup any outstanding Tx packets */
2101         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2102                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2103                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2104         }
2105
2106         return 0;
2107 }
2108
2109 static void
2110 typhoon_tx_timeout(struct net_device *dev)
2111 {
2112         struct typhoon *tp = netdev_priv(dev);
2113
2114         if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2115                 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2116                                         dev->name);
2117                 goto truely_dead;
2118         }
2119
2120         /* If we ever start using the Hi ring, it will need cleaning too */
2121         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2122         typhoon_free_rx_rings(tp);
2123
2124         if(typhoon_start_runtime(tp) < 0) {
2125                 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2126                                         dev->name);
2127                 goto truely_dead;
2128         }
2129
2130         netif_wake_queue(dev);
2131         return;
2132
2133 truely_dead:
2134         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2135         typhoon_reset(tp->ioaddr, NoWait);
2136         netif_carrier_off(dev);
2137 }
2138
2139 static int
2140 typhoon_open(struct net_device *dev)
2141 {
2142         struct typhoon *tp = netdev_priv(dev);
2143         int err;
2144
2145         err = typhoon_request_firmware(tp);
2146         if (err)
2147                 goto out;
2148
2149         err = typhoon_wakeup(tp, WaitSleep);
2150         if(err < 0) {
2151                 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2152                 goto out_sleep;
2153         }
2154
2155         err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2156                                 dev->name, dev);
2157         if(err < 0)
2158                 goto out_sleep;
2159
2160         napi_enable(&tp->napi);
2161
2162         err = typhoon_start_runtime(tp);
2163         if(err < 0) {
2164                 napi_disable(&tp->napi);
2165                 goto out_irq;
2166         }
2167
2168         netif_start_queue(dev);
2169         return 0;
2170
2171 out_irq:
2172         free_irq(dev->irq, dev);
2173
2174 out_sleep:
2175         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2176                 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2177                                 dev->name);
2178                 typhoon_reset(tp->ioaddr, NoWait);
2179                 goto out;
2180         }
2181
2182         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2183                 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2184
2185 out:
2186         return err;
2187 }
2188
2189 static int
2190 typhoon_close(struct net_device *dev)
2191 {
2192         struct typhoon *tp = netdev_priv(dev);
2193
2194         netif_stop_queue(dev);
2195         napi_disable(&tp->napi);
2196
2197         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2198                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2199
2200         /* Make sure there is no irq handler running on a different CPU. */
2201         free_irq(dev->irq, dev);
2202
2203         typhoon_free_rx_rings(tp);
2204         typhoon_init_rings(tp);
2205
2206         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2207                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2208
2209         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2210                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2211
2212         return 0;
2213 }
2214
2215 #ifdef CONFIG_PM
2216 static int
2217 typhoon_resume(struct pci_dev *pdev)
2218 {
2219         struct net_device *dev = pci_get_drvdata(pdev);
2220         struct typhoon *tp = netdev_priv(dev);
2221
2222         /* If we're down, resume when we are upped.
2223          */
2224         if(!netif_running(dev))
2225                 return 0;
2226
2227         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2228                 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2229                                 dev->name);
2230                 goto reset;
2231         }
2232
2233         if(typhoon_start_runtime(tp) < 0) {
2234                 printk(KERN_ERR "%s: critical: could not start runtime in "
2235                                 "resume\n", dev->name);
2236                 goto reset;
2237         }
2238
2239         netif_device_attach(dev);
2240         return 0;
2241
2242 reset:
2243         typhoon_reset(tp->ioaddr, NoWait);
2244         return -EBUSY;
2245 }
2246
2247 static int
2248 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2249 {
2250         struct net_device *dev = pci_get_drvdata(pdev);
2251         struct typhoon *tp = netdev_priv(dev);
2252         struct cmd_desc xp_cmd;
2253
2254         /* If we're down, we're already suspended.
2255          */
2256         if(!netif_running(dev))
2257                 return 0;
2258
2259         spin_lock_bh(&tp->state_lock);
2260         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2261                 spin_unlock_bh(&tp->state_lock);
2262                 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2263                                 dev->name);
2264                 return -EBUSY;
2265         }
2266         spin_unlock_bh(&tp->state_lock);
2267
2268         netif_device_detach(dev);
2269
2270         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2271                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2272                 goto need_resume;
2273         }
2274
2275         typhoon_free_rx_rings(tp);
2276         typhoon_init_rings(tp);
2277
2278         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2279                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2280                 goto need_resume;
2281         }
2282
2283         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2284         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2285         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2286         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2287                 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2288                                 dev->name);
2289                 goto need_resume;
2290         }
2291
2292         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2293         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2294         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2295                 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2296                                 dev->name);
2297                 goto need_resume;
2298         }
2299
2300         if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2301                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2302                 goto need_resume;
2303         }
2304
2305         return 0;
2306
2307 need_resume:
2308         typhoon_resume(pdev);
2309         return -EBUSY;
2310 }
2311 #endif
2312
2313 static int __devinit
2314 typhoon_test_mmio(struct pci_dev *pdev)
2315 {
2316         void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2317         int mode = 0;
2318         u32 val;
2319
2320         if(!ioaddr)
2321                 goto out;
2322
2323         if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2324                                 TYPHOON_STATUS_WAITING_FOR_HOST)
2325                 goto out_unmap;
2326
2327         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2328         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2329         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2330
2331         /* Ok, see if we can change our interrupt status register by
2332          * sending ourselves an interrupt. If so, then MMIO works.
2333          * The 50usec delay is arbitrary -- it could probably be smaller.
2334          */
2335         val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2336         if((val & TYPHOON_INTR_SELF) == 0) {
2337                 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2338                 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2339                 udelay(50);
2340                 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2341                 if(val & TYPHOON_INTR_SELF)
2342                         mode = 1;
2343         }
2344
2345         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2346         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2347         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2348         ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2349
2350 out_unmap:
2351         pci_iounmap(pdev, ioaddr);
2352
2353 out:
2354         if(!mode)
2355                 printk(KERN_INFO PFX "falling back to port IO\n");
2356         return mode;
2357 }
2358
2359 static const struct net_device_ops typhoon_netdev_ops = {
2360         .ndo_open               = typhoon_open,
2361         .ndo_stop               = typhoon_close,
2362         .ndo_start_xmit         = typhoon_start_tx,
2363         .ndo_set_multicast_list = typhoon_set_rx_mode,
2364         .ndo_tx_timeout         = typhoon_tx_timeout,
2365         .ndo_get_stats          = typhoon_get_stats,
2366         .ndo_validate_addr      = eth_validate_addr,
2367         .ndo_set_mac_address    = typhoon_set_mac_address,
2368         .ndo_change_mtu         = eth_change_mtu,
2369         .ndo_vlan_rx_register   = typhoon_vlan_rx_register,
2370 };
2371
2372 static int __devinit
2373 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2374 {
2375         static int did_version = 0;
2376         struct net_device *dev;
2377         struct typhoon *tp;
2378         int card_id = (int) ent->driver_data;
2379         void __iomem *ioaddr;
2380         void *shared;
2381         dma_addr_t shared_dma;
2382         struct cmd_desc xp_cmd;
2383         struct resp_desc xp_resp[3];
2384         int err = 0;
2385
2386         if(!did_version++)
2387                 printk(KERN_INFO "%s", version);
2388
2389         dev = alloc_etherdev(sizeof(*tp));
2390         if(dev == NULL) {
2391                 printk(ERR_PFX "%s: unable to alloc new net device\n",
2392                        pci_name(pdev));
2393                 err = -ENOMEM;
2394                 goto error_out;
2395         }
2396         SET_NETDEV_DEV(dev, &pdev->dev);
2397
2398         err = pci_enable_device(pdev);
2399         if(err < 0) {
2400                 printk(ERR_PFX "%s: unable to enable device\n",
2401                        pci_name(pdev));
2402                 goto error_out_dev;
2403         }
2404
2405         err = pci_set_mwi(pdev);
2406         if(err < 0) {
2407                 printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
2408                 goto error_out_disable;
2409         }
2410
2411         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2412         if(err < 0) {
2413                 printk(ERR_PFX "%s: No usable DMA configuration\n",
2414                        pci_name(pdev));
2415                 goto error_out_mwi;
2416         }
2417
2418         /* sanity checks on IO and MMIO BARs
2419          */
2420         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2421                 printk(ERR_PFX
2422                        "%s: region #1 not a PCI IO resource, aborting\n",
2423                        pci_name(pdev));
2424                 err = -ENODEV;
2425                 goto error_out_mwi;
2426         }
2427         if(pci_resource_len(pdev, 0) < 128) {
2428                 printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
2429                        pci_name(pdev));
2430                 err = -ENODEV;
2431                 goto error_out_mwi;
2432         }
2433         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2434                 printk(ERR_PFX
2435                        "%s: region #1 not a PCI MMIO resource, aborting\n",
2436                        pci_name(pdev));
2437                 err = -ENODEV;
2438                 goto error_out_mwi;
2439         }
2440         if(pci_resource_len(pdev, 1) < 128) {
2441                 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2442                        pci_name(pdev));
2443                 err = -ENODEV;
2444                 goto error_out_mwi;
2445         }
2446
2447         err = pci_request_regions(pdev, "typhoon");
2448         if(err < 0) {
2449                 printk(ERR_PFX "%s: could not request regions\n",
2450                        pci_name(pdev));
2451                 goto error_out_mwi;
2452         }
2453
2454         /* map our registers
2455          */
2456         if(use_mmio != 0 && use_mmio != 1)
2457                 use_mmio = typhoon_test_mmio(pdev);
2458
2459         ioaddr = pci_iomap(pdev, use_mmio, 128);
2460         if (!ioaddr) {
2461                 printk(ERR_PFX "%s: cannot remap registers, aborting\n",
2462                        pci_name(pdev));
2463                 err = -EIO;
2464                 goto error_out_regions;
2465         }
2466
2467         /* allocate pci dma space for rx and tx descriptor rings
2468          */
2469         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2470                                       &shared_dma);
2471         if(!shared) {
2472                 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2473                        pci_name(pdev));
2474                 err = -ENOMEM;
2475                 goto error_out_remap;
2476         }
2477
2478         dev->irq = pdev->irq;
2479         tp = netdev_priv(dev);
2480         tp->shared = (struct typhoon_shared *) shared;
2481         tp->shared_dma = shared_dma;
2482         tp->pdev = pdev;
2483         tp->tx_pdev = pdev;
2484         tp->ioaddr = ioaddr;
2485         tp->tx_ioaddr = ioaddr;
2486         tp->dev = dev;
2487
2488         /* Init sequence:
2489          * 1) Reset the adapter to clear any bad juju
2490          * 2) Reload the sleep image
2491          * 3) Boot the sleep image
2492          * 4) Get the hardware address.
2493          * 5) Put the card to sleep.
2494          */
2495         if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2496                 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2497                 err = -EIO;
2498                 goto error_out_dma;
2499         }
2500
2501         /* Now that we've reset the 3XP and are sure it's not going to
2502          * write all over memory, enable bus mastering, and save our
2503          * state for resuming after a suspend.
2504          */
2505         pci_set_master(pdev);
2506         pci_save_state(pdev);
2507
2508         /* dev->name is not valid until we register, but we need to
2509          * use some common routines to initialize the card. So that those
2510          * routines print the right name, we keep our oun pointer to the name
2511          */
2512         tp->name = pci_name(pdev);
2513
2514         typhoon_init_interface(tp);
2515         typhoon_init_rings(tp);
2516
2517         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2518                 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2519                        pci_name(pdev));
2520                 err = -EIO;
2521                 goto error_out_reset;
2522         }
2523
2524         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2525         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2526                 printk(ERR_PFX "%s: cannot read MAC address\n",
2527                        pci_name(pdev));
2528                 err = -EIO;
2529                 goto error_out_reset;
2530         }
2531
2532         *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2533         *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2534
2535         if(!is_valid_ether_addr(dev->dev_addr)) {
2536                 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2537                        "aborting\n", pci_name(pdev));
2538                 goto error_out_reset;
2539         }
2540
2541         /* Read the Sleep Image version last, so the response is valid
2542          * later when we print out the version reported.
2543          */
2544         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2545         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2546                 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2547                         pci_name(pdev));
2548                 goto error_out_reset;
2549         }
2550
2551         tp->capabilities = typhoon_card_info[card_id].capabilities;
2552         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2553
2554         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2555          * READ_VERSIONS command. Those versions are OK after waking up
2556          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2557          * seem to need a little extra help to get started. Since we don't
2558          * know how to nudge it along, just kick it.
2559          */
2560         if(xp_resp[0].numDesc != 0)
2561                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2562
2563         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2564                 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2565                        pci_name(pdev));
2566                 err = -EIO;
2567                 goto error_out_reset;
2568         }
2569
2570         /* The chip-specific entries in the device structure. */
2571         dev->netdev_ops         = &typhoon_netdev_ops;
2572         netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2573         dev->watchdog_timeo     = TX_TIMEOUT;
2574
2575         SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2576
2577         /* We can handle scatter gather, up to 16 entries, and
2578          * we can do IP checksumming (only version 4, doh...)
2579          */
2580         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2581         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2582         dev->features |= NETIF_F_TSO;
2583
2584         if(register_netdev(dev) < 0)
2585                 goto error_out_reset;
2586
2587         /* fixup our local name */
2588         tp->name = dev->name;
2589
2590         pci_set_drvdata(pdev, dev);
2591
2592         printk(KERN_INFO "%s: %s at %s 0x%llx, %pM\n",
2593                dev->name, typhoon_card_info[card_id].name,
2594                use_mmio ? "MMIO" : "IO",
2595                (unsigned long long)pci_resource_start(pdev, use_mmio),
2596                dev->dev_addr);
2597
2598         /* xp_resp still contains the response to the READ_VERSIONS command.
2599          * For debugging, let the user know what version he has.
2600          */
2601         if(xp_resp[0].numDesc == 0) {
2602                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2603                  * of version is Month/Day of build.
2604                  */
2605                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2606                 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2607                         "%02u/%02u/2000\n", dev->name, monthday >> 8,
2608                         monthday & 0xff);
2609         } else if(xp_resp[0].numDesc == 2) {
2610                 /* This is the Typhoon 1.1+ type Sleep Image
2611                  */
2612                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2613                 u8 *ver_string = (u8 *) &xp_resp[1];
2614                 ver_string[25] = 0;
2615                 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2616                         "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
2617                         (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
2618                         ver_string);
2619         } else {
2620                 printk(KERN_WARNING "%s: Unknown Sleep Image version "
2621                         "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2622                         le32_to_cpu(xp_resp[0].parm2));
2623         }
2624
2625         return 0;
2626
2627 error_out_reset:
2628         typhoon_reset(ioaddr, NoWait);
2629
2630 error_out_dma:
2631         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2632                             shared, shared_dma);
2633 error_out_remap:
2634         pci_iounmap(pdev, ioaddr);
2635 error_out_regions:
2636         pci_release_regions(pdev);
2637 error_out_mwi:
2638         pci_clear_mwi(pdev);
2639 error_out_disable:
2640         pci_disable_device(pdev);
2641 error_out_dev:
2642         free_netdev(dev);
2643 error_out:
2644         return err;
2645 }
2646
2647 static void __devexit
2648 typhoon_remove_one(struct pci_dev *pdev)
2649 {
2650         struct net_device *dev = pci_get_drvdata(pdev);
2651         struct typhoon *tp = netdev_priv(dev);
2652
2653         unregister_netdev(dev);
2654         pci_set_power_state(pdev, PCI_D0);
2655         pci_restore_state(pdev);
2656         typhoon_reset(tp->ioaddr, NoWait);
2657         pci_iounmap(pdev, tp->ioaddr);
2658         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2659                             tp->shared, tp->shared_dma);
2660         pci_release_regions(pdev);
2661         pci_clear_mwi(pdev);
2662         pci_disable_device(pdev);
2663         pci_set_drvdata(pdev, NULL);
2664         free_netdev(dev);
2665 }
2666
2667 static struct pci_driver typhoon_driver = {
2668         .name           = DRV_MODULE_NAME,
2669         .id_table       = typhoon_pci_tbl,
2670         .probe          = typhoon_init_one,
2671         .remove         = __devexit_p(typhoon_remove_one),
2672 #ifdef CONFIG_PM
2673         .suspend        = typhoon_suspend,
2674         .resume         = typhoon_resume,
2675 #endif
2676 };
2677
2678 static int __init
2679 typhoon_init(void)
2680 {
2681         return pci_register_driver(&typhoon_driver);
2682 }
2683
2684 static void __exit
2685 typhoon_cleanup(void)
2686 {
2687         if (typhoon_fw)
2688                 release_firmware(typhoon_fw);
2689         pci_unregister_driver(&typhoon_driver);
2690 }
2691
2692 module_init(typhoon_init);
2693 module_exit(typhoon_cleanup);