[PATCH] chelsio: add 1G swcixw aupport
[safe/jmp/linux-2.6] / drivers / net / chelsio / cxgb2.c
1 /*****************************************************************************
2  *                                                                           *
3  * File: cxgb2.c                                                             *
4  * $Revision: 1.25 $                                                         *
5  * $Date: 2005/06/22 00:43:25 $                                              *
6  * Description:                                                              *
7  *  Chelsio 10Gb Ethernet Driver.                                            *
8  *                                                                           *
9  * This program is free software; you can redistribute it and/or modify      *
10  * it under the terms of the GNU General Public License, version 2, as       *
11  * published by the Free Software Foundation.                                *
12  *                                                                           *
13  * You should have received a copy of the GNU General Public License along   *
14  * with this program; if not, write to the Free Software Foundation, Inc.,   *
15  * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
16  *                                                                           *
17  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
18  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
20  *                                                                           *
21  * http://www.chelsio.com                                                    *
22  *                                                                           *
23  * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
24  * All rights reserved.                                                      *
25  *                                                                           *
26  * Maintainers: maintainers@chelsio.com                                      *
27  *                                                                           *
28  * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
29  *          Tina Yang               <tainay@chelsio.com>                     *
30  *          Felix Marti             <felix@chelsio.com>                      *
31  *          Scott Bardone           <sbardone@chelsio.com>                   *
32  *          Kurt Ottaway            <kottaway@chelsio.com>                   *
33  *          Frank DiMambro          <frank@chelsio.com>                      *
34  *                                                                           *
35  * History:                                                                  *
36  *                                                                           *
37  ****************************************************************************/
38
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
50
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "tp.h"
57 #include "espi.h"
58 #include "elmer0.h"
59
60 #include <linux/workqueue.h>
61
62 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
63 {
64         schedule_delayed_work(&ap->stats_update_task, secs * HZ);
65 }
66
67 static inline void cancel_mac_stats_update(struct adapter *ap)
68 {
69         cancel_delayed_work(&ap->stats_update_task);
70 }
71
72 #define MAX_CMDQ_ENTRIES 16384
73 #define MAX_CMDQ1_ENTRIES 1024
74 #define MAX_RX_BUFFERS 16384
75 #define MAX_RX_JUMBO_BUFFERS 16384
76 #define MAX_TX_BUFFERS_HIGH     16384U
77 #define MAX_TX_BUFFERS_LOW      1536U
78 #define MAX_TX_BUFFERS          1460U
79 #define MIN_FL_ENTRIES 32
80
81 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84
85 /*
86  * The EEPROM is actually bigger but only the first few bytes are used so we
87  * only report those.
88  */
89 #define EEPROM_SIZE 32
90
91 MODULE_DESCRIPTION(DRV_DESCRIPTION);
92 MODULE_AUTHOR("Chelsio Communications");
93 MODULE_LICENSE("GPL");
94
95 static int dflt_msg_enable = DFLT_MSG_ENABLE;
96
97 module_param(dflt_msg_enable, int, 0);
98 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
99
100 #define HCLOCK 0x0
101 #define LCLOCK 0x1
102
103 /* T1 cards powersave mode */
104 static int t1_clock(struct adapter *adapter, int mode);
105 static int t1powersave = 1;     /* HW default is powersave mode. */
106
107 module_param(t1powersave, int, 0);
108 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
109
110 static const char pci_speed[][4] = {
111         "33", "66", "100", "133"
112 };
113
114 /*
115  * Setup MAC to receive the types of packets we want.
116  */
117 static void t1_set_rxmode(struct net_device *dev)
118 {
119         struct adapter *adapter = dev->priv;
120         struct cmac *mac = adapter->port[dev->if_port].mac;
121         struct t1_rx_mode rm;
122
123         rm.dev = dev;
124         rm.idx = 0;
125         rm.list = dev->mc_list;
126         mac->ops->set_rx_mode(mac, &rm);
127 }
128
129 static void link_report(struct port_info *p)
130 {
131         if (!netif_carrier_ok(p->dev))
132                 printk(KERN_INFO "%s: link down\n", p->dev->name);
133         else {
134                 const char *s = "10Mbps";
135
136                 switch (p->link_config.speed) {
137                         case SPEED_10000: s = "10Gbps"; break;
138                         case SPEED_1000:  s = "1000Mbps"; break;
139                         case SPEED_100:   s = "100Mbps"; break;
140                 }
141
142         printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
143                        p->dev->name, s,
144                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
145         }
146 }
147
148 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
149                         int speed, int duplex, int pause)
150 {
151         struct port_info *p = &adapter->port[port_id];
152
153         if (link_stat != netif_carrier_ok(p->dev)) {
154                 if (link_stat)
155                         netif_carrier_on(p->dev);
156                 else
157                         netif_carrier_off(p->dev);
158                 link_report(p);
159
160                 /* multi-ports: inform toe */
161                 if ((speed > 0) && (adapter->params.nports > 1)) {
162                         unsigned int sched_speed = 10;
163                         switch (speed) {
164                         case SPEED_1000:
165                                 sched_speed = 1000;
166                                 break;
167                         case SPEED_100:
168                                 sched_speed = 100;
169                                 break;
170                         case SPEED_10:
171                                 sched_speed = 10;
172                                 break;
173                         }
174                         t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
175                 }
176         }
177 }
178
179 static void link_start(struct port_info *p)
180 {
181         struct cmac *mac = p->mac;
182
183         mac->ops->reset(mac);
184         if (mac->ops->macaddress_set)
185                 mac->ops->macaddress_set(mac, p->dev->dev_addr);
186         t1_set_rxmode(p->dev);
187         t1_link_start(p->phy, mac, &p->link_config);
188         mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
189 }
190
191 static void enable_hw_csum(struct adapter *adapter)
192 {
193         if (adapter->flags & TSO_CAPABLE)
194                 t1_tp_set_ip_checksum_offload(adapter->tp, 1);  /* for TSO only */
195         if (adapter->flags & UDP_CSUM_CAPABLE)
196                 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
197         t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
198 }
199
200 /*
201  * Things to do upon first use of a card.
202  * This must run with the rtnl lock held.
203  */
204 static int cxgb_up(struct adapter *adapter)
205 {
206         int err = 0;
207
208         if (!(adapter->flags & FULL_INIT_DONE)) {
209                 err = t1_init_hw_modules(adapter);
210                 if (err)
211                         goto out_err;
212
213                 enable_hw_csum(adapter);
214                 adapter->flags |= FULL_INIT_DONE;
215         }
216
217         t1_interrupts_clear(adapter);
218         if ((err = request_irq(adapter->pdev->irq,
219                                t1_select_intr_handler(adapter), IRQF_SHARED,
220                                adapter->name, adapter))) {
221                 goto out_err;
222         }
223         t1_sge_start(adapter->sge);
224         t1_interrupts_enable(adapter);
225  out_err:
226         return err;
227 }
228
229 /*
230  * Release resources when all the ports have been stopped.
231  */
232 static void cxgb_down(struct adapter *adapter)
233 {
234         t1_sge_stop(adapter->sge);
235         t1_interrupts_disable(adapter);
236         free_irq(adapter->pdev->irq, adapter);
237 }
238
239 static int cxgb_open(struct net_device *dev)
240 {
241         int err;
242         struct adapter *adapter = dev->priv;
243         int other_ports = adapter->open_device_map & PORT_MASK;
244
245         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
246                 return err;
247
248         __set_bit(dev->if_port, &adapter->open_device_map);
249         link_start(&adapter->port[dev->if_port]);
250         netif_start_queue(dev);
251         if (!other_ports && adapter->params.stats_update_period)
252                 schedule_mac_stats_update(adapter,
253                                           adapter->params.stats_update_period);
254         return 0;
255 }
256
257 static int cxgb_close(struct net_device *dev)
258 {
259         struct adapter *adapter = dev->priv;
260         struct port_info *p = &adapter->port[dev->if_port];
261         struct cmac *mac = p->mac;
262
263         netif_stop_queue(dev);
264         mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
265         netif_carrier_off(dev);
266
267         clear_bit(dev->if_port, &adapter->open_device_map);
268         if (adapter->params.stats_update_period &&
269             !(adapter->open_device_map & PORT_MASK)) {
270                 /* Stop statistics accumulation. */
271                 smp_mb__after_clear_bit();
272                 spin_lock(&adapter->work_lock);   /* sync with update task */
273                 spin_unlock(&adapter->work_lock);
274                 cancel_mac_stats_update(adapter);
275         }
276
277         if (!adapter->open_device_map)
278                 cxgb_down(adapter);
279         return 0;
280 }
281
282 static struct net_device_stats *t1_get_stats(struct net_device *dev)
283 {
284         struct adapter *adapter = dev->priv;
285         struct port_info *p = &adapter->port[dev->if_port];
286         struct net_device_stats *ns = &p->netstats;
287         const struct cmac_statistics *pstats;
288
289         /* Do a full update of the MAC stats */
290         pstats = p->mac->ops->statistics_update(p->mac,
291                                                 MAC_STATS_UPDATE_FULL);
292
293         ns->tx_packets = pstats->TxUnicastFramesOK +
294                 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
295
296         ns->rx_packets = pstats->RxUnicastFramesOK +
297                 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
298
299         ns->tx_bytes = pstats->TxOctetsOK;
300         ns->rx_bytes = pstats->RxOctetsOK;
301
302         ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
303                 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
304         ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
305                 pstats->RxFCSErrors + pstats->RxAlignErrors +
306                 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
307                 pstats->RxSymbolErrors + pstats->RxRuntErrors;
308
309         ns->multicast  = pstats->RxMulticastFramesOK;
310         ns->collisions = pstats->TxTotalCollisions;
311
312         /* detailed rx_errors */
313         ns->rx_length_errors = pstats->RxFrameTooLongErrors +
314                 pstats->RxJabberErrors;
315         ns->rx_over_errors   = 0;
316         ns->rx_crc_errors    = pstats->RxFCSErrors;
317         ns->rx_frame_errors  = pstats->RxAlignErrors;
318         ns->rx_fifo_errors   = 0;
319         ns->rx_missed_errors = 0;
320
321         /* detailed tx_errors */
322         ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
323         ns->tx_carrier_errors   = 0;
324         ns->tx_fifo_errors      = pstats->TxUnderrun;
325         ns->tx_heartbeat_errors = 0;
326         ns->tx_window_errors    = pstats->TxLateCollisions;
327         return ns;
328 }
329
330 static u32 get_msglevel(struct net_device *dev)
331 {
332         struct adapter *adapter = dev->priv;
333
334         return adapter->msg_enable;
335 }
336
337 static void set_msglevel(struct net_device *dev, u32 val)
338 {
339         struct adapter *adapter = dev->priv;
340
341         adapter->msg_enable = val;
342 }
343
344 static char stats_strings[][ETH_GSTRING_LEN] = {
345         "TxOctetsOK",
346         "TxOctetsBad",
347         "TxUnicastFramesOK",
348         "TxMulticastFramesOK",
349         "TxBroadcastFramesOK",
350         "TxPauseFrames",
351         "TxFramesWithDeferredXmissions",
352         "TxLateCollisions",
353         "TxTotalCollisions",
354         "TxFramesAbortedDueToXSCollisions",
355         "TxUnderrun",
356         "TxLengthErrors",
357         "TxInternalMACXmitError",
358         "TxFramesWithExcessiveDeferral",
359         "TxFCSErrors",
360
361         "RxOctetsOK",
362         "RxOctetsBad",
363         "RxUnicastFramesOK",
364         "RxMulticastFramesOK",
365         "RxBroadcastFramesOK",
366         "RxPauseFrames",
367         "RxFCSErrors",
368         "RxAlignErrors",
369         "RxSymbolErrors",
370         "RxDataErrors",
371         "RxSequenceErrors",
372         "RxRuntErrors",
373         "RxJabberErrors",
374         "RxInternalMACRcvError",
375         "RxInRangeLengthErrors",
376         "RxOutOfRangeLengthField",
377         "RxFrameTooLongErrors",
378
379         "TSO",
380         "VLANextractions",
381         "VLANinsertions",
382         "RxCsumGood",
383         "TxCsumOffload",
384         "RxDrops"
385
386         "respQ_empty",
387         "respQ_overflow",
388         "freelistQ_empty",
389         "pkt_too_big",
390         "pkt_mismatch",
391         "cmdQ_full0",
392         "cmdQ_full1",
393         "tx_ipfrags",
394         "tx_reg_pkts",
395         "tx_lso_pkts",
396         "tx_do_cksum",
397
398         "espi_DIP2ParityErr",
399         "espi_DIP4Err",
400         "espi_RxDrops",
401         "espi_TxDrops",
402         "espi_RxOvfl",
403         "espi_ParityErr"
404 };
405
406 #define T2_REGMAP_SIZE (3 * 1024)
407
408 static int get_regs_len(struct net_device *dev)
409 {
410         return T2_REGMAP_SIZE;
411 }
412
413 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
414 {
415         struct adapter *adapter = dev->priv;
416
417         strcpy(info->driver, DRV_NAME);
418         strcpy(info->version, DRV_VERSION);
419         strcpy(info->fw_version, "N/A");
420         strcpy(info->bus_info, pci_name(adapter->pdev));
421 }
422
423 static int get_stats_count(struct net_device *dev)
424 {
425         return ARRAY_SIZE(stats_strings);
426 }
427
428 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
429 {
430         if (stringset == ETH_SS_STATS)
431                 memcpy(data, stats_strings, sizeof(stats_strings));
432 }
433
434 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
435                       u64 *data)
436 {
437         struct adapter *adapter = dev->priv;
438         struct cmac *mac = adapter->port[dev->if_port].mac;
439         const struct cmac_statistics *s;
440         const struct sge_port_stats *ss;
441         const struct sge_intr_counts *t;
442
443         s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
444         ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
445         t = t1_sge_get_intr_counts(adapter->sge);
446
447         *data++ = s->TxOctetsOK;
448         *data++ = s->TxOctetsBad;
449         *data++ = s->TxUnicastFramesOK;
450         *data++ = s->TxMulticastFramesOK;
451         *data++ = s->TxBroadcastFramesOK;
452         *data++ = s->TxPauseFrames;
453         *data++ = s->TxFramesWithDeferredXmissions;
454         *data++ = s->TxLateCollisions;
455         *data++ = s->TxTotalCollisions;
456         *data++ = s->TxFramesAbortedDueToXSCollisions;
457         *data++ = s->TxUnderrun;
458         *data++ = s->TxLengthErrors;
459         *data++ = s->TxInternalMACXmitError;
460         *data++ = s->TxFramesWithExcessiveDeferral;
461         *data++ = s->TxFCSErrors;
462
463         *data++ = s->RxOctetsOK;
464         *data++ = s->RxOctetsBad;
465         *data++ = s->RxUnicastFramesOK;
466         *data++ = s->RxMulticastFramesOK;
467         *data++ = s->RxBroadcastFramesOK;
468         *data++ = s->RxPauseFrames;
469         *data++ = s->RxFCSErrors;
470         *data++ = s->RxAlignErrors;
471         *data++ = s->RxSymbolErrors;
472         *data++ = s->RxDataErrors;
473         *data++ = s->RxSequenceErrors;
474         *data++ = s->RxRuntErrors;
475         *data++ = s->RxJabberErrors;
476         *data++ = s->RxInternalMACRcvError;
477         *data++ = s->RxInRangeLengthErrors;
478         *data++ = s->RxOutOfRangeLengthField;
479         *data++ = s->RxFrameTooLongErrors;
480
481         *data++ = ss->tso;
482         *data++ = ss->vlan_xtract;
483         *data++ = ss->vlan_insert;
484         *data++ = ss->rx_cso_good;
485         *data++ = ss->tx_cso;
486         *data++ = ss->rx_drops;
487
488         *data++ = (u64)t->respQ_empty;
489         *data++ = (u64)t->respQ_overflow;
490         *data++ = (u64)t->freelistQ_empty;
491         *data++ = (u64)t->pkt_too_big;
492         *data++ = (u64)t->pkt_mismatch;
493         *data++ = (u64)t->cmdQ_full[0];
494         *data++ = (u64)t->cmdQ_full[1];
495         *data++ = (u64)t->tx_ipfrags;
496         *data++ = (u64)t->tx_reg_pkts;
497         *data++ = (u64)t->tx_lso_pkts;
498         *data++ = (u64)t->tx_do_cksum;
499
500         if (adapter->espi) {
501                 const struct espi_intr_counts *e;
502
503                 e = t1_espi_get_intr_counts(adapter->espi);
504                 *data++ = (u64) e->DIP2_parity_err;
505                 *data++ = (u64) e->DIP4_err;
506                 *data++ = (u64) e->rx_drops;
507                 *data++ = (u64) e->tx_drops;
508                 *data++ = (u64) e->rx_ovflw;
509                 *data++ = (u64) e->parity_err;
510         }
511 }
512
513 static inline void reg_block_dump(struct adapter *ap, void *buf,
514                                   unsigned int start, unsigned int end)
515 {
516         u32 *p = buf + start;
517
518         for ( ; start <= end; start += sizeof(u32))
519                 *p++ = readl(ap->regs + start);
520 }
521
522 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
523                      void *buf)
524 {
525         struct adapter *ap = dev->priv;
526
527         /*
528          * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
529          */
530         regs->version = 2;
531
532         memset(buf, 0, T2_REGMAP_SIZE);
533         reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
534         reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
535         reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
536         reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
537         reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
538         reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
539         reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
540         reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
541         reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
542         reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
543 }
544
545 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
546 {
547         struct adapter *adapter = dev->priv;
548         struct port_info *p = &adapter->port[dev->if_port];
549
550         cmd->supported = p->link_config.supported;
551         cmd->advertising = p->link_config.advertising;
552
553         if (netif_carrier_ok(dev)) {
554                 cmd->speed = p->link_config.speed;
555                 cmd->duplex = p->link_config.duplex;
556         } else {
557                 cmd->speed = -1;
558                 cmd->duplex = -1;
559         }
560
561         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
562         cmd->phy_address = p->phy->addr;
563         cmd->transceiver = XCVR_EXTERNAL;
564         cmd->autoneg = p->link_config.autoneg;
565         cmd->maxtxpkt = 0;
566         cmd->maxrxpkt = 0;
567         return 0;
568 }
569
570 static int speed_duplex_to_caps(int speed, int duplex)
571 {
572         int cap = 0;
573
574         switch (speed) {
575         case SPEED_10:
576                 if (duplex == DUPLEX_FULL)
577                         cap = SUPPORTED_10baseT_Full;
578                 else
579                         cap = SUPPORTED_10baseT_Half;
580                 break;
581         case SPEED_100:
582                 if (duplex == DUPLEX_FULL)
583                         cap = SUPPORTED_100baseT_Full;
584                 else
585                         cap = SUPPORTED_100baseT_Half;
586                 break;
587         case SPEED_1000:
588                 if (duplex == DUPLEX_FULL)
589                         cap = SUPPORTED_1000baseT_Full;
590                 else
591                         cap = SUPPORTED_1000baseT_Half;
592                 break;
593         case SPEED_10000:
594                 if (duplex == DUPLEX_FULL)
595                         cap = SUPPORTED_10000baseT_Full;
596         }
597         return cap;
598 }
599
600 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
601                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
602                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
603                       ADVERTISED_10000baseT_Full)
604
605 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
606 {
607         struct adapter *adapter = dev->priv;
608         struct port_info *p = &adapter->port[dev->if_port];
609         struct link_config *lc = &p->link_config;
610
611         if (!(lc->supported & SUPPORTED_Autoneg))
612                 return -EOPNOTSUPP;             /* can't change speed/duplex */
613
614         if (cmd->autoneg == AUTONEG_DISABLE) {
615                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
616
617                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
618                         return -EINVAL;
619                 lc->requested_speed = cmd->speed;
620                 lc->requested_duplex = cmd->duplex;
621                 lc->advertising = 0;
622         } else {
623                 cmd->advertising &= ADVERTISED_MASK;
624                 if (cmd->advertising & (cmd->advertising - 1))
625                         cmd->advertising = lc->supported;
626                 cmd->advertising &= lc->supported;
627                 if (!cmd->advertising)
628                         return -EINVAL;
629                 lc->requested_speed = SPEED_INVALID;
630                 lc->requested_duplex = DUPLEX_INVALID;
631                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
632         }
633         lc->autoneg = cmd->autoneg;
634         if (netif_running(dev))
635                 t1_link_start(p->phy, p->mac, lc);
636         return 0;
637 }
638
639 static void get_pauseparam(struct net_device *dev,
640                            struct ethtool_pauseparam *epause)
641 {
642         struct adapter *adapter = dev->priv;
643         struct port_info *p = &adapter->port[dev->if_port];
644
645         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
646         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
647         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
648 }
649
650 static int set_pauseparam(struct net_device *dev,
651                           struct ethtool_pauseparam *epause)
652 {
653         struct adapter *adapter = dev->priv;
654         struct port_info *p = &adapter->port[dev->if_port];
655         struct link_config *lc = &p->link_config;
656
657         if (epause->autoneg == AUTONEG_DISABLE)
658                 lc->requested_fc = 0;
659         else if (lc->supported & SUPPORTED_Autoneg)
660                 lc->requested_fc = PAUSE_AUTONEG;
661         else
662                 return -EINVAL;
663
664         if (epause->rx_pause)
665                 lc->requested_fc |= PAUSE_RX;
666         if (epause->tx_pause)
667                 lc->requested_fc |= PAUSE_TX;
668         if (lc->autoneg == AUTONEG_ENABLE) {
669                 if (netif_running(dev))
670                         t1_link_start(p->phy, p->mac, lc);
671         } else {
672                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
673                 if (netif_running(dev))
674                         p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
675                                                          lc->fc);
676         }
677         return 0;
678 }
679
680 static u32 get_rx_csum(struct net_device *dev)
681 {
682         struct adapter *adapter = dev->priv;
683
684         return (adapter->flags & RX_CSUM_ENABLED) != 0;
685 }
686
687 static int set_rx_csum(struct net_device *dev, u32 data)
688 {
689         struct adapter *adapter = dev->priv;
690
691         if (data)
692                 adapter->flags |= RX_CSUM_ENABLED;
693         else
694                 adapter->flags &= ~RX_CSUM_ENABLED;
695         return 0;
696 }
697
698 static int set_tso(struct net_device *dev, u32 value)
699 {
700         struct adapter *adapter = dev->priv;
701
702         if (!(adapter->flags & TSO_CAPABLE))
703                 return value ? -EOPNOTSUPP : 0;
704         return ethtool_op_set_tso(dev, value);
705 }
706
707 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
708 {
709         struct adapter *adapter = dev->priv;
710         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
711
712         e->rx_max_pending = MAX_RX_BUFFERS;
713         e->rx_mini_max_pending = 0;
714         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
715         e->tx_max_pending = MAX_CMDQ_ENTRIES;
716
717         e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
718         e->rx_mini_pending = 0;
719         e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
720         e->tx_pending = adapter->params.sge.cmdQ_size[0];
721 }
722
723 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
724 {
725         struct adapter *adapter = dev->priv;
726         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
727
728         if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
729             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
730             e->tx_pending > MAX_CMDQ_ENTRIES ||
731             e->rx_pending < MIN_FL_ENTRIES ||
732             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
733             e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
734                 return -EINVAL;
735
736         if (adapter->flags & FULL_INIT_DONE)
737         return -EBUSY;
738
739         adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
740         adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
741         adapter->params.sge.cmdQ_size[0] = e->tx_pending;
742         adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
743                 MAX_CMDQ1_ENTRIES : e->tx_pending;
744         return 0;
745 }
746
747 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
748 {
749         struct adapter *adapter = dev->priv;
750
751         /*
752          * If RX coalescing is requested we use NAPI, otherwise interrupts.
753          * This choice can be made only when all ports and the TOE are off.
754          */
755         if (adapter->open_device_map == 0)
756                 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
757
758         if (adapter->params.sge.polling) {
759                 adapter->params.sge.rx_coalesce_usecs = 0;
760         } else {
761                 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
762         }
763         adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
764         adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
765         t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
766         return 0;
767 }
768
769 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
770 {
771         struct adapter *adapter = dev->priv;
772
773         c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
774         c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
775         c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
776         return 0;
777 }
778
779 static int get_eeprom_len(struct net_device *dev)
780 {
781         struct adapter *adapter = dev->priv;
782
783         return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
784 }
785
786 #define EEPROM_MAGIC(ap) \
787         (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
788
789 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
790                       u8 *data)
791 {
792         int i;
793         u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
794         struct adapter *adapter = dev->priv;
795
796         e->magic = EEPROM_MAGIC(adapter);
797         for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
798                 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
799         memcpy(data, buf + e->offset, e->len);
800         return 0;
801 }
802
803 static const struct ethtool_ops t1_ethtool_ops = {
804         .get_settings      = get_settings,
805         .set_settings      = set_settings,
806         .get_drvinfo       = get_drvinfo,
807         .get_msglevel      = get_msglevel,
808         .set_msglevel      = set_msglevel,
809         .get_ringparam     = get_sge_param,
810         .set_ringparam     = set_sge_param,
811         .get_coalesce      = get_coalesce,
812         .set_coalesce      = set_coalesce,
813         .get_eeprom_len    = get_eeprom_len,
814         .get_eeprom        = get_eeprom,
815         .get_pauseparam    = get_pauseparam,
816         .set_pauseparam    = set_pauseparam,
817         .get_rx_csum       = get_rx_csum,
818         .set_rx_csum       = set_rx_csum,
819         .get_tx_csum       = ethtool_op_get_tx_csum,
820         .set_tx_csum       = ethtool_op_set_tx_csum,
821         .get_sg            = ethtool_op_get_sg,
822         .set_sg            = ethtool_op_set_sg,
823         .get_link          = ethtool_op_get_link,
824         .get_strings       = get_strings,
825         .get_stats_count   = get_stats_count,
826         .get_ethtool_stats = get_stats,
827         .get_regs_len      = get_regs_len,
828         .get_regs          = get_regs,
829         .get_tso           = ethtool_op_get_tso,
830         .set_tso           = set_tso,
831 };
832
833 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
834 {
835         struct adapter *adapter = dev->priv;
836         struct mii_ioctl_data *data = if_mii(req);
837
838         switch (cmd) {
839         case SIOCGMIIPHY:
840                 data->phy_id = adapter->port[dev->if_port].phy->addr;
841                 /* FALLTHRU */
842         case SIOCGMIIREG: {
843                 struct cphy *phy = adapter->port[dev->if_port].phy;
844                 u32 val;
845
846                 if (!phy->mdio_read)
847             return -EOPNOTSUPP;
848                 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
849                                &val);
850                 data->val_out = val;
851                 break;
852         }
853         case SIOCSMIIREG: {
854                 struct cphy *phy = adapter->port[dev->if_port].phy;
855
856                 if (!capable(CAP_NET_ADMIN))
857                     return -EPERM;
858                 if (!phy->mdio_write)
859             return -EOPNOTSUPP;
860                 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
861                                 data->val_in);
862                 break;
863         }
864
865         default:
866                 return -EOPNOTSUPP;
867         }
868         return 0;
869 }
870
871 static int t1_change_mtu(struct net_device *dev, int new_mtu)
872 {
873         int ret;
874         struct adapter *adapter = dev->priv;
875         struct cmac *mac = adapter->port[dev->if_port].mac;
876
877         if (!mac->ops->set_mtu)
878         return -EOPNOTSUPP;
879         if (new_mtu < 68)
880         return -EINVAL;
881         if ((ret = mac->ops->set_mtu(mac, new_mtu)))
882                 return ret;
883         dev->mtu = new_mtu;
884         return 0;
885 }
886
887 static int t1_set_mac_addr(struct net_device *dev, void *p)
888 {
889         struct adapter *adapter = dev->priv;
890         struct cmac *mac = adapter->port[dev->if_port].mac;
891         struct sockaddr *addr = p;
892
893         if (!mac->ops->macaddress_set)
894                 return -EOPNOTSUPP;
895
896         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
897         mac->ops->macaddress_set(mac, dev->dev_addr);
898         return 0;
899 }
900
901 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
902 static void vlan_rx_register(struct net_device *dev,
903                                    struct vlan_group *grp)
904 {
905         struct adapter *adapter = dev->priv;
906
907         spin_lock_irq(&adapter->async_lock);
908         adapter->vlan_grp = grp;
909         t1_set_vlan_accel(adapter, grp != NULL);
910         spin_unlock_irq(&adapter->async_lock);
911 }
912
913 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
914 {
915         struct adapter *adapter = dev->priv;
916
917         spin_lock_irq(&adapter->async_lock);
918         if (adapter->vlan_grp)
919                 adapter->vlan_grp->vlan_devices[vid] = NULL;
920         spin_unlock_irq(&adapter->async_lock);
921 }
922 #endif
923
924 #ifdef CONFIG_NET_POLL_CONTROLLER
925 static void t1_netpoll(struct net_device *dev)
926 {
927         unsigned long flags;
928         struct adapter *adapter = dev->priv;
929
930         local_irq_save(flags);
931         t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
932         local_irq_restore(flags);
933 }
934 #endif
935
936 /*
937  * Periodic accumulation of MAC statistics.  This is used only if the MAC
938  * does not have any other way to prevent stats counter overflow.
939  */
940 static void mac_stats_task(void *data)
941 {
942         int i;
943         struct adapter *adapter = data;
944
945         for_each_port(adapter, i) {
946                 struct port_info *p = &adapter->port[i];
947
948                 if (netif_running(p->dev))
949                         p->mac->ops->statistics_update(p->mac,
950                                                        MAC_STATS_UPDATE_FAST);
951         }
952
953         /* Schedule the next statistics update if any port is active. */
954         spin_lock(&adapter->work_lock);
955         if (adapter->open_device_map & PORT_MASK)
956                 schedule_mac_stats_update(adapter,
957                                           adapter->params.stats_update_period);
958         spin_unlock(&adapter->work_lock);
959 }
960
961 /*
962  * Processes elmer0 external interrupts in process context.
963  */
964 static void ext_intr_task(void *data)
965 {
966         struct adapter *adapter = data;
967
968         t1_elmer0_ext_intr_handler(adapter);
969
970         /* Now reenable external interrupts */
971         spin_lock_irq(&adapter->async_lock);
972         adapter->slow_intr_mask |= F_PL_INTR_EXT;
973         writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
974         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
975                    adapter->regs + A_PL_ENABLE);
976         spin_unlock_irq(&adapter->async_lock);
977 }
978
979 /*
980  * Interrupt-context handler for elmer0 external interrupts.
981  */
982 void t1_elmer0_ext_intr(struct adapter *adapter)
983 {
984         /*
985          * Schedule a task to handle external interrupts as we require
986          * a process context.  We disable EXT interrupts in the interim
987          * and let the task reenable them when it's done.
988          */
989         adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
990         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
991                    adapter->regs + A_PL_ENABLE);
992         schedule_work(&adapter->ext_intr_handler_task);
993 }
994
995 void t1_fatal_err(struct adapter *adapter)
996 {
997         if (adapter->flags & FULL_INIT_DONE) {
998                 t1_sge_stop(adapter->sge);
999                 t1_interrupts_disable(adapter);
1000         }
1001         CH_ALERT("%s: encountered fatal error, operation suspended\n",
1002                  adapter->name);
1003 }
1004
1005 static int __devinit init_one(struct pci_dev *pdev,
1006                               const struct pci_device_id *ent)
1007 {
1008         static int version_printed;
1009
1010         int i, err, pci_using_dac = 0;
1011         unsigned long mmio_start, mmio_len;
1012         const struct board_info *bi;
1013         struct adapter *adapter = NULL;
1014         struct port_info *pi;
1015
1016         if (!version_printed) {
1017                 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1018                        DRV_VERSION);
1019                 ++version_printed;
1020         }
1021
1022         err = pci_enable_device(pdev);
1023         if (err)
1024                 return err;
1025
1026         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1027                 CH_ERR("%s: cannot find PCI device memory base address\n",
1028                        pci_name(pdev));
1029                 err = -ENODEV;
1030                 goto out_disable_pdev;
1031         }
1032
1033         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1034                 pci_using_dac = 1;
1035
1036                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1037                         CH_ERR("%s: unable to obtain 64-bit DMA for"
1038                                "consistent allocations\n", pci_name(pdev));
1039                         err = -ENODEV;
1040                         goto out_disable_pdev;
1041                 }
1042
1043         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1044                 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1045                 goto out_disable_pdev;
1046         }
1047
1048         err = pci_request_regions(pdev, DRV_NAME);
1049         if (err) {
1050                 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1051                 goto out_disable_pdev;
1052         }
1053
1054         pci_set_master(pdev);
1055
1056         mmio_start = pci_resource_start(pdev, 0);
1057         mmio_len = pci_resource_len(pdev, 0);
1058         bi = t1_get_board_info(ent->driver_data);
1059
1060         for (i = 0; i < bi->port_number; ++i) {
1061                 struct net_device *netdev;
1062
1063                 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1064                 if (!netdev) {
1065                         err = -ENOMEM;
1066                         goto out_free_dev;
1067                 }
1068
1069                 SET_MODULE_OWNER(netdev);
1070                 SET_NETDEV_DEV(netdev, &pdev->dev);
1071
1072                 if (!adapter) {
1073                         adapter = netdev->priv;
1074                         adapter->pdev = pdev;
1075                         adapter->port[0].dev = netdev;  /* so we don't leak it */
1076
1077                         adapter->regs = ioremap(mmio_start, mmio_len);
1078                         if (!adapter->regs) {
1079                                 CH_ERR("%s: cannot map device registers\n",
1080                                        pci_name(pdev));
1081                                 err = -ENOMEM;
1082                                 goto out_free_dev;
1083                         }
1084
1085                         if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1086                                 err = -ENODEV;    /* Can't handle this chip rev */
1087                                 goto out_free_dev;
1088                         }
1089
1090                         adapter->name = pci_name(pdev);
1091                         adapter->msg_enable = dflt_msg_enable;
1092                         adapter->mmio_len = mmio_len;
1093
1094                         spin_lock_init(&adapter->tpi_lock);
1095                         spin_lock_init(&adapter->work_lock);
1096                         spin_lock_init(&adapter->async_lock);
1097                         spin_lock_init(&adapter->mac_lock);
1098
1099                         INIT_WORK(&adapter->ext_intr_handler_task,
1100                                   ext_intr_task, adapter);
1101                         INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1102                                   adapter);
1103
1104                         pci_set_drvdata(pdev, netdev);
1105                 }
1106
1107                 pi = &adapter->port[i];
1108                 pi->dev = netdev;
1109                 netif_carrier_off(netdev);
1110                 netdev->irq = pdev->irq;
1111                 netdev->if_port = i;
1112                 netdev->mem_start = mmio_start;
1113                 netdev->mem_end = mmio_start + mmio_len - 1;
1114                 netdev->priv = adapter;
1115                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1116                 netdev->features |= NETIF_F_LLTX;
1117
1118                 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1119                 if (pci_using_dac)
1120                         netdev->features |= NETIF_F_HIGHDMA;
1121                 if (vlan_tso_capable(adapter)) {
1122 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1123                         adapter->flags |= VLAN_ACCEL_CAPABLE;
1124                         netdev->features |=
1125                                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1126                         netdev->vlan_rx_register = vlan_rx_register;
1127                         netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1128 #endif
1129
1130                         /* T204: disable TSO */
1131                         if (!(is_T2(adapter)) || bi->port_number != 4) {
1132                                 adapter->flags |= TSO_CAPABLE;
1133                                 netdev->features |= NETIF_F_TSO;
1134                         }
1135                 }
1136
1137                 netdev->open = cxgb_open;
1138                 netdev->stop = cxgb_close;
1139                 netdev->hard_start_xmit = t1_start_xmit;
1140                 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1141                         sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1142                 netdev->get_stats = t1_get_stats;
1143                 netdev->set_multicast_list = t1_set_rxmode;
1144                 netdev->do_ioctl = t1_ioctl;
1145                 netdev->change_mtu = t1_change_mtu;
1146                 netdev->set_mac_address = t1_set_mac_addr;
1147 #ifdef CONFIG_NET_POLL_CONTROLLER
1148                 netdev->poll_controller = t1_netpoll;
1149 #endif
1150                 netdev->weight = 64;
1151
1152                 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1153         }
1154
1155         if (t1_init_sw_modules(adapter, bi) < 0) {
1156                 err = -ENODEV;
1157                 goto out_free_dev;
1158         }
1159
1160         /*
1161          * The card is now ready to go.  If any errors occur during device
1162          * registration we do not fail the whole card but rather proceed only
1163          * with the ports we manage to register successfully.  However we must
1164          * register at least one net device.
1165          */
1166         for (i = 0; i < bi->port_number; ++i) {
1167                 err = register_netdev(adapter->port[i].dev);
1168                 if (err)
1169                         CH_WARN("%s: cannot register net device %s, skipping\n",
1170                                 pci_name(pdev), adapter->port[i].dev->name);
1171                 else {
1172                         /*
1173                          * Change the name we use for messages to the name of
1174                          * the first successfully registered interface.
1175                          */
1176                         if (!adapter->registered_device_map)
1177                                 adapter->name = adapter->port[i].dev->name;
1178
1179                         __set_bit(i, &adapter->registered_device_map);
1180                 }
1181         }
1182         if (!adapter->registered_device_map) {
1183                 CH_ERR("%s: could not register any net devices\n",
1184                        pci_name(pdev));
1185                 goto out_release_adapter_res;
1186         }
1187
1188         printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1189                bi->desc, adapter->params.chip_revision,
1190                adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1191                adapter->params.pci.speed, adapter->params.pci.width);
1192
1193         /*
1194          * Set the T1B ASIC and memory clocks.
1195          */
1196         if (t1powersave)
1197                 adapter->t1powersave = LCLOCK;  /* HW default is powersave mode. */
1198         else
1199                 adapter->t1powersave = HCLOCK;
1200         if (t1_is_T1B(adapter))
1201                 t1_clock(adapter, t1powersave);
1202
1203         return 0;
1204
1205  out_release_adapter_res:
1206         t1_free_sw_modules(adapter);
1207  out_free_dev:
1208         if (adapter) {
1209                 if (adapter->regs)
1210                         iounmap(adapter->regs);
1211                 for (i = bi->port_number - 1; i >= 0; --i)
1212                         if (adapter->port[i].dev)
1213                                 free_netdev(adapter->port[i].dev);
1214         }
1215         pci_release_regions(pdev);
1216  out_disable_pdev:
1217         pci_disable_device(pdev);
1218         pci_set_drvdata(pdev, NULL);
1219         return err;
1220 }
1221
1222 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1223 {
1224         int data;
1225         int i;
1226         u32 val;
1227
1228         enum {
1229                 S_CLOCK = 1 << 3,
1230                 S_DATA = 1 << 4
1231         };
1232
1233         for (i = (nbits - 1); i > -1; i--) {
1234
1235                 udelay(50);
1236
1237                 data = ((bitdata >> i) & 0x1);
1238                 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1239
1240                 if (data)
1241                         val |= S_DATA;
1242                 else
1243                         val &= ~S_DATA;
1244
1245                 udelay(50);
1246
1247                 /* Set SCLOCK low */
1248                 val &= ~S_CLOCK;
1249                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1250
1251                 udelay(50);
1252
1253                 /* Write SCLOCK high */
1254                 val |= S_CLOCK;
1255                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1256
1257         }
1258 }
1259
1260 static int t1_clock(struct adapter *adapter, int mode)
1261 {
1262         u32 val;
1263         int M_CORE_VAL;
1264         int M_MEM_VAL;
1265
1266         enum {
1267                 M_CORE_BITS = 9,
1268                 T_CORE_VAL = 0,
1269                 T_CORE_BITS = 2,
1270                 N_CORE_VAL = 0,
1271                 N_CORE_BITS = 2,
1272                 M_MEM_BITS = 9,
1273                 T_MEM_VAL = 0,
1274                 T_MEM_BITS = 2,
1275                 N_MEM_VAL = 0,
1276                 N_MEM_BITS = 2,
1277                 NP_LOAD = 1 << 17,
1278                 S_LOAD_MEM = 1 << 5,
1279                 S_LOAD_CORE = 1 << 6,
1280                 S_CLOCK = 1 << 3
1281         };
1282
1283         if (!t1_is_T1B(adapter))
1284                 return -ENODEV; /* Can't re-clock this chip. */
1285
1286         if (mode & 2) {
1287                 return 0;       /* show current mode. */
1288         }
1289
1290         if ((adapter->t1powersave & 1) == (mode & 1))
1291                 return -EALREADY;       /* ASIC already running in mode. */
1292
1293         if ((mode & 1) == HCLOCK) {
1294                 M_CORE_VAL = 0x14;
1295                 M_MEM_VAL = 0x18;
1296                 adapter->t1powersave = HCLOCK;  /* overclock */
1297         } else {
1298                 M_CORE_VAL = 0xe;
1299                 M_MEM_VAL = 0x10;
1300                 adapter->t1powersave = LCLOCK;  /* underclock */
1301         }
1302
1303         /* Don't interrupt this serial stream! */
1304         spin_lock(&adapter->tpi_lock);
1305
1306         /* Initialize for ASIC core */
1307         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1308         val |= NP_LOAD;
1309         udelay(50);
1310         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1311         udelay(50);
1312         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1313         val &= ~S_LOAD_CORE;
1314         val &= ~S_CLOCK;
1315         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1316         udelay(50);
1317
1318         /* Serial program the ASIC clock synthesizer */
1319         bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1320         bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1321         bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1322         udelay(50);
1323
1324         /* Finish ASIC core */
1325         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1326         val |= S_LOAD_CORE;
1327         udelay(50);
1328         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1329         udelay(50);
1330         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1331         val &= ~S_LOAD_CORE;
1332         udelay(50);
1333         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1334         udelay(50);
1335
1336         /* Initialize for memory */
1337         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1338         val |= NP_LOAD;
1339         udelay(50);
1340         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1341         udelay(50);
1342         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1343         val &= ~S_LOAD_MEM;
1344         val &= ~S_CLOCK;
1345         udelay(50);
1346         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1347         udelay(50);
1348
1349         /* Serial program the memory clock synthesizer */
1350         bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1351         bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1352         bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1353         udelay(50);
1354
1355         /* Finish memory */
1356         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1357         val |= S_LOAD_MEM;
1358         udelay(50);
1359         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1360         udelay(50);
1361         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1362         val &= ~S_LOAD_MEM;
1363         udelay(50);
1364         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1365
1366         spin_unlock(&adapter->tpi_lock);
1367
1368         return 0;
1369 }
1370
1371 static inline void t1_sw_reset(struct pci_dev *pdev)
1372 {
1373         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1374         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1375 }
1376
1377 static void __devexit remove_one(struct pci_dev *pdev)
1378 {
1379         struct net_device *dev = pci_get_drvdata(pdev);
1380
1381         if (dev) {
1382                 int i;
1383                 struct adapter *adapter = dev->priv;
1384
1385                 for_each_port(adapter, i)
1386                         if (test_bit(i, &adapter->registered_device_map))
1387                                 unregister_netdev(adapter->port[i].dev);
1388
1389                 t1_free_sw_modules(adapter);
1390                 iounmap(adapter->regs);
1391                 while (--i >= 0)
1392                         if (adapter->port[i].dev)
1393                                 free_netdev(adapter->port[i].dev);
1394
1395                 pci_release_regions(pdev);
1396                 pci_disable_device(pdev);
1397                 pci_set_drvdata(pdev, NULL);
1398                 t1_sw_reset(pdev);
1399         }
1400 }
1401
1402 static struct pci_driver driver = {
1403         .name     = DRV_NAME,
1404         .id_table = t1_pci_tbl,
1405         .probe    = init_one,
1406         .remove   = __devexit_p(remove_one),
1407 };
1408
1409 static int __init t1_init_module(void)
1410 {
1411         return pci_register_driver(&driver);
1412 }
1413
1414 static void __exit t1_cleanup_module(void)
1415 {
1416         pci_unregister_driver(&driver);
1417 }
1418
1419 module_init(t1_init_module);
1420 module_exit(t1_cleanup_module);