[PATCH] b44: fix manual speed/duplex/autoneg settings
[safe/jmp/linux-2.6] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2006 Broadcom Corporation.
6  *
7  * Distribute under GPL.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
27
28 #include "b44.h"
29
30 #define DRV_MODULE_NAME         "b44"
31 #define PFX DRV_MODULE_NAME     ": "
32 #define DRV_MODULE_VERSION      "1.00"
33 #define DRV_MODULE_RELDATE      "Apr 7, 2006"
34
35 #define B44_DEF_MSG_ENABLE        \
36         (NETIF_MSG_DRV          | \
37          NETIF_MSG_PROBE        | \
38          NETIF_MSG_LINK         | \
39          NETIF_MSG_TIMER        | \
40          NETIF_MSG_IFDOWN       | \
41          NETIF_MSG_IFUP         | \
42          NETIF_MSG_RX_ERR       | \
43          NETIF_MSG_TX_ERR)
44
45 /* length of time before we decide the hardware is borked,
46  * and dev->tx_timeout() should be called to fix the problem
47  */
48 #define B44_TX_TIMEOUT                  (5 * HZ)
49
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU                     60
52 #define B44_MAX_MTU                     1500
53
54 #define B44_RX_RING_SIZE                512
55 #define B44_DEF_RX_RING_PENDING         200
56 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
57                                  B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE                512
59 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
61                                  B44_TX_RING_SIZE)
62 #define B44_DMA_MASK 0x3fffffff
63
64 #define TX_RING_GAP(BP) \
65         (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP)                                              \
67         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
68           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
69           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ           (B44_MAX_MTU + ETH_HLEN + 8)
74
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
77
78 static char version[] __devinitdata =
79         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
80
81 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
82 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
83 MODULE_LICENSE("GPL");
84 MODULE_VERSION(DRV_MODULE_VERSION);
85
86 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
87 module_param(b44_debug, int, 0);
88 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
89
90 static struct pci_device_id b44_pci_tbl[] = {
91         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
92           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
93         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
94           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
95         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
96           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
97         { }     /* terminate list with empty entry */
98 };
99
100 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
101
102 static void b44_halt(struct b44 *);
103 static void b44_init_rings(struct b44 *);
104 static void b44_init_hw(struct b44 *);
105
106 static int dma_desc_align_mask;
107 static int dma_desc_sync_size;
108
109 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
110 #define _B44(x...)      # x,
111 B44_STAT_REG_DECLARE
112 #undef _B44
113 };
114
115 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
116                                                 dma_addr_t dma_base,
117                                                 unsigned long offset,
118                                                 enum dma_data_direction dir)
119 {
120         dma_sync_single_range_for_device(&pdev->dev, dma_base,
121                                          offset & dma_desc_align_mask,
122                                          dma_desc_sync_size, dir);
123 }
124
125 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
126                                              dma_addr_t dma_base,
127                                              unsigned long offset,
128                                              enum dma_data_direction dir)
129 {
130         dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
131                                       offset & dma_desc_align_mask,
132                                       dma_desc_sync_size, dir);
133 }
134
135 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
136 {
137         return readl(bp->regs + reg);
138 }
139
140 static inline void bw32(const struct b44 *bp,
141                         unsigned long reg, unsigned long val)
142 {
143         writel(val, bp->regs + reg);
144 }
145
146 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
147                         u32 bit, unsigned long timeout, const int clear)
148 {
149         unsigned long i;
150
151         for (i = 0; i < timeout; i++) {
152                 u32 val = br32(bp, reg);
153
154                 if (clear && !(val & bit))
155                         break;
156                 if (!clear && (val & bit))
157                         break;
158                 udelay(10);
159         }
160         if (i == timeout) {
161                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
162                        "%lx to %s.\n",
163                        bp->dev->name,
164                        bit, reg,
165                        (clear ? "clear" : "set"));
166                 return -ENODEV;
167         }
168         return 0;
169 }
170
171 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
172  * buzz words used on this company's website :-)
173  *
174  * All of these routines must be invoked with bp->lock held and
175  * interrupts disabled.
176  */
177
178 #define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
179 #define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
180
181 static u32 ssb_get_core_rev(struct b44 *bp)
182 {
183         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
184 }
185
186 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
187 {
188         u32 bar_orig, pci_rev, val;
189
190         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
191         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
192         pci_rev = ssb_get_core_rev(bp);
193
194         val = br32(bp, B44_SBINTVEC);
195         val |= cores;
196         bw32(bp, B44_SBINTVEC, val);
197
198         val = br32(bp, SSB_PCI_TRANS_2);
199         val |= SSB_PCI_PREF | SSB_PCI_BURST;
200         bw32(bp, SSB_PCI_TRANS_2, val);
201
202         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
203
204         return pci_rev;
205 }
206
207 static void ssb_core_disable(struct b44 *bp)
208 {
209         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
210                 return;
211
212         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
213         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
214         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
215         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
216                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
217         br32(bp, B44_SBTMSLOW);
218         udelay(1);
219         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
220         br32(bp, B44_SBTMSLOW);
221         udelay(1);
222 }
223
224 static void ssb_core_reset(struct b44 *bp)
225 {
226         u32 val;
227
228         ssb_core_disable(bp);
229         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
230         br32(bp, B44_SBTMSLOW);
231         udelay(1);
232
233         /* Clear SERR if set, this is a hw bug workaround.  */
234         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
235                 bw32(bp, B44_SBTMSHIGH, 0);
236
237         val = br32(bp, B44_SBIMSTATE);
238         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
239                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
240
241         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
242         br32(bp, B44_SBTMSLOW);
243         udelay(1);
244
245         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
246         br32(bp, B44_SBTMSLOW);
247         udelay(1);
248 }
249
250 static int ssb_core_unit(struct b44 *bp)
251 {
252 #if 0
253         u32 val = br32(bp, B44_SBADMATCH0);
254         u32 base;
255
256         type = val & SBADMATCH0_TYPE_MASK;
257         switch (type) {
258         case 0:
259                 base = val & SBADMATCH0_BS0_MASK;
260                 break;
261
262         case 1:
263                 base = val & SBADMATCH0_BS1_MASK;
264                 break;
265
266         case 2:
267         default:
268                 base = val & SBADMATCH0_BS2_MASK;
269                 break;
270         };
271 #endif
272         return 0;
273 }
274
275 static int ssb_is_core_up(struct b44 *bp)
276 {
277         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
278                 == SBTMSLOW_CLOCK);
279 }
280
281 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
282 {
283         u32 val;
284
285         val  = ((u32) data[2]) << 24;
286         val |= ((u32) data[3]) << 16;
287         val |= ((u32) data[4]) <<  8;
288         val |= ((u32) data[5]) <<  0;
289         bw32(bp, B44_CAM_DATA_LO, val);
290         val = (CAM_DATA_HI_VALID |
291                (((u32) data[0]) << 8) |
292                (((u32) data[1]) << 0));
293         bw32(bp, B44_CAM_DATA_HI, val);
294         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
295                             (index << CAM_CTRL_INDEX_SHIFT)));
296         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
297 }
298
299 static inline void __b44_disable_ints(struct b44 *bp)
300 {
301         bw32(bp, B44_IMASK, 0);
302 }
303
304 static void b44_disable_ints(struct b44 *bp)
305 {
306         __b44_disable_ints(bp);
307
308         /* Flush posted writes. */
309         br32(bp, B44_IMASK);
310 }
311
312 static void b44_enable_ints(struct b44 *bp)
313 {
314         bw32(bp, B44_IMASK, bp->imask);
315 }
316
317 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
318 {
319         int err;
320
321         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
322         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
323                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
324                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
325                              (reg << MDIO_DATA_RA_SHIFT) |
326                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
327         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
328         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
329
330         return err;
331 }
332
333 static int b44_writephy(struct b44 *bp, int reg, u32 val)
334 {
335         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
338                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339                              (reg << MDIO_DATA_RA_SHIFT) |
340                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
341                              (val & MDIO_DATA_DATA)));
342         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
343 }
344
345 /* miilib interface */
346 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
347  * due to code existing before miilib use was added to this driver.
348  * Someone should remove this artificial driver limitation in
349  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
350  */
351 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
352 {
353         u32 val;
354         struct b44 *bp = netdev_priv(dev);
355         int rc = b44_readphy(bp, location, &val);
356         if (rc)
357                 return 0xffffffff;
358         return val;
359 }
360
361 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
362                          int val)
363 {
364         struct b44 *bp = netdev_priv(dev);
365         b44_writephy(bp, location, val);
366 }
367
368 static int b44_phy_reset(struct b44 *bp)
369 {
370         u32 val;
371         int err;
372
373         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
374         if (err)
375                 return err;
376         udelay(100);
377         err = b44_readphy(bp, MII_BMCR, &val);
378         if (!err) {
379                 if (val & BMCR_RESET) {
380                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
381                                bp->dev->name);
382                         err = -ENODEV;
383                 }
384         }
385
386         return 0;
387 }
388
389 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
390 {
391         u32 val;
392
393         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
394         bp->flags |= pause_flags;
395
396         val = br32(bp, B44_RXCONFIG);
397         if (pause_flags & B44_FLAG_RX_PAUSE)
398                 val |= RXCONFIG_FLOW;
399         else
400                 val &= ~RXCONFIG_FLOW;
401         bw32(bp, B44_RXCONFIG, val);
402
403         val = br32(bp, B44_MAC_FLOW);
404         if (pause_flags & B44_FLAG_TX_PAUSE)
405                 val |= (MAC_FLOW_PAUSE_ENAB |
406                         (0xc0 & MAC_FLOW_RX_HI_WATER));
407         else
408                 val &= ~MAC_FLOW_PAUSE_ENAB;
409         bw32(bp, B44_MAC_FLOW, val);
410 }
411
412 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
413 {
414         u32 pause_enab = 0;
415
416         /* The driver supports only rx pause by default because
417            the b44 mac tx pause mechanism generates excessive
418            pause frames.
419            Use ethtool to turn on b44 tx pause if necessary.
420          */
421         if ((local & ADVERTISE_PAUSE_CAP) &&
422             (local & ADVERTISE_PAUSE_ASYM)){
423                 if ((remote & LPA_PAUSE_ASYM) &&
424                     !(remote & LPA_PAUSE_CAP))
425                         pause_enab |= B44_FLAG_RX_PAUSE;
426         }
427
428         __b44_set_flow_ctrl(bp, pause_enab);
429 }
430
431 static int b44_setup_phy(struct b44 *bp)
432 {
433         u32 val;
434         int err;
435
436         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
437                 goto out;
438         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
439                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
440                 goto out;
441         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
442                 goto out;
443         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
444                                 val | MII_TLEDCTRL_ENABLE)) != 0)
445                 goto out;
446
447         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
448                 u32 adv = ADVERTISE_CSMA;
449
450                 if (bp->flags & B44_FLAG_ADV_10HALF)
451                         adv |= ADVERTISE_10HALF;
452                 if (bp->flags & B44_FLAG_ADV_10FULL)
453                         adv |= ADVERTISE_10FULL;
454                 if (bp->flags & B44_FLAG_ADV_100HALF)
455                         adv |= ADVERTISE_100HALF;
456                 if (bp->flags & B44_FLAG_ADV_100FULL)
457                         adv |= ADVERTISE_100FULL;
458
459                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
460                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
461
462                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
463                         goto out;
464                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
465                                                        BMCR_ANRESTART))) != 0)
466                         goto out;
467         } else {
468                 u32 bmcr;
469
470                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
471                         goto out;
472                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
473                 if (bp->flags & B44_FLAG_100_BASE_T)
474                         bmcr |= BMCR_SPEED100;
475                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
476                         bmcr |= BMCR_FULLDPLX;
477                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
478                         goto out;
479
480                 /* Since we will not be negotiating there is no safe way
481                  * to determine if the link partner supports flow control
482                  * or not.  So just disable it completely in this case.
483                  */
484                 b44_set_flow_ctrl(bp, 0, 0);
485         }
486
487 out:
488         return err;
489 }
490
491 static void b44_stats_update(struct b44 *bp)
492 {
493         unsigned long reg;
494         u32 *val;
495
496         val = &bp->hw_stats.tx_good_octets;
497         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
498                 *val++ += br32(bp, reg);
499         }
500
501         /* Pad */
502         reg += 8*4UL;
503
504         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
505                 *val++ += br32(bp, reg);
506         }
507 }
508
509 static void b44_link_report(struct b44 *bp)
510 {
511         if (!netif_carrier_ok(bp->dev)) {
512                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
513         } else {
514                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
515                        bp->dev->name,
516                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
517                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
518
519                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
520                        "%s for RX.\n",
521                        bp->dev->name,
522                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
523                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
524         }
525 }
526
527 static void b44_check_phy(struct b44 *bp)
528 {
529         u32 bmsr, aux;
530
531         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
532             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
533             (bmsr != 0xffff)) {
534                 if (aux & MII_AUXCTRL_SPEED)
535                         bp->flags |= B44_FLAG_100_BASE_T;
536                 else
537                         bp->flags &= ~B44_FLAG_100_BASE_T;
538                 if (aux & MII_AUXCTRL_DUPLEX)
539                         bp->flags |= B44_FLAG_FULL_DUPLEX;
540                 else
541                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
542
543                 if (!netif_carrier_ok(bp->dev) &&
544                     (bmsr & BMSR_LSTATUS)) {
545                         u32 val = br32(bp, B44_TX_CTRL);
546                         u32 local_adv, remote_adv;
547
548                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
549                                 val |= TX_CTRL_DUPLEX;
550                         else
551                                 val &= ~TX_CTRL_DUPLEX;
552                         bw32(bp, B44_TX_CTRL, val);
553
554                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
555                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
556                             !b44_readphy(bp, MII_LPA, &remote_adv))
557                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
558
559                         /* Link now up */
560                         netif_carrier_on(bp->dev);
561                         b44_link_report(bp);
562                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
563                         /* Link now down */
564                         netif_carrier_off(bp->dev);
565                         b44_link_report(bp);
566                 }
567
568                 if (bmsr & BMSR_RFAULT)
569                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
570                                bp->dev->name);
571                 if (bmsr & BMSR_JCD)
572                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
573                                bp->dev->name);
574         }
575 }
576
577 static void b44_timer(unsigned long __opaque)
578 {
579         struct b44 *bp = (struct b44 *) __opaque;
580
581         spin_lock_irq(&bp->lock);
582
583         b44_check_phy(bp);
584
585         b44_stats_update(bp);
586
587         spin_unlock_irq(&bp->lock);
588
589         bp->timer.expires = jiffies + HZ;
590         add_timer(&bp->timer);
591 }
592
593 static void b44_tx(struct b44 *bp)
594 {
595         u32 cur, cons;
596
597         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
598         cur /= sizeof(struct dma_desc);
599
600         /* XXX needs updating when NETIF_F_SG is supported */
601         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
602                 struct ring_info *rp = &bp->tx_buffers[cons];
603                 struct sk_buff *skb = rp->skb;
604
605                 BUG_ON(skb == NULL);
606
607                 pci_unmap_single(bp->pdev,
608                                  pci_unmap_addr(rp, mapping),
609                                  skb->len,
610                                  PCI_DMA_TODEVICE);
611                 rp->skb = NULL;
612                 dev_kfree_skb_irq(skb);
613         }
614
615         bp->tx_cons = cons;
616         if (netif_queue_stopped(bp->dev) &&
617             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
618                 netif_wake_queue(bp->dev);
619
620         bw32(bp, B44_GPTIMER, 0);
621 }
622
623 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
624  * before the DMA address you give it.  So we allocate 30 more bytes
625  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
626  * point the chip at 30 bytes past where the rx_header will go.
627  */
628 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
629 {
630         struct dma_desc *dp;
631         struct ring_info *src_map, *map;
632         struct rx_header *rh;
633         struct sk_buff *skb;
634         dma_addr_t mapping;
635         int dest_idx;
636         u32 ctrl;
637
638         src_map = NULL;
639         if (src_idx >= 0)
640                 src_map = &bp->rx_buffers[src_idx];
641         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
642         map = &bp->rx_buffers[dest_idx];
643         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
644         if (skb == NULL)
645                 return -ENOMEM;
646
647         mapping = pci_map_single(bp->pdev, skb->data,
648                                  RX_PKT_BUF_SZ,
649                                  PCI_DMA_FROMDEVICE);
650
651         /* Hardware bug work-around, the chip is unable to do PCI DMA
652            to/from anything above 1GB :-( */
653         if (dma_mapping_error(mapping) ||
654                 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
655                 /* Sigh... */
656                 if (!dma_mapping_error(mapping))
657                         pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
658                 dev_kfree_skb_any(skb);
659                 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
660                 if (skb == NULL)
661                         return -ENOMEM;
662                 mapping = pci_map_single(bp->pdev, skb->data,
663                                          RX_PKT_BUF_SZ,
664                                          PCI_DMA_FROMDEVICE);
665                 if (dma_mapping_error(mapping) ||
666                         mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
667                         if (!dma_mapping_error(mapping))
668                                 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
669                         dev_kfree_skb_any(skb);
670                         return -ENOMEM;
671                 }
672         }
673
674         skb->dev = bp->dev;
675         skb_reserve(skb, bp->rx_offset);
676
677         rh = (struct rx_header *)
678                 (skb->data - bp->rx_offset);
679         rh->len = 0;
680         rh->flags = 0;
681
682         map->skb = skb;
683         pci_unmap_addr_set(map, mapping, mapping);
684
685         if (src_map != NULL)
686                 src_map->skb = NULL;
687
688         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
689         if (dest_idx == (B44_RX_RING_SIZE - 1))
690                 ctrl |= DESC_CTRL_EOT;
691
692         dp = &bp->rx_ring[dest_idx];
693         dp->ctrl = cpu_to_le32(ctrl);
694         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
695
696         if (bp->flags & B44_FLAG_RX_RING_HACK)
697                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
698                                              dest_idx * sizeof(dp),
699                                              DMA_BIDIRECTIONAL);
700
701         return RX_PKT_BUF_SZ;
702 }
703
704 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
705 {
706         struct dma_desc *src_desc, *dest_desc;
707         struct ring_info *src_map, *dest_map;
708         struct rx_header *rh;
709         int dest_idx;
710         u32 ctrl;
711
712         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
713         dest_desc = &bp->rx_ring[dest_idx];
714         dest_map = &bp->rx_buffers[dest_idx];
715         src_desc = &bp->rx_ring[src_idx];
716         src_map = &bp->rx_buffers[src_idx];
717
718         dest_map->skb = src_map->skb;
719         rh = (struct rx_header *) src_map->skb->data;
720         rh->len = 0;
721         rh->flags = 0;
722         pci_unmap_addr_set(dest_map, mapping,
723                            pci_unmap_addr(src_map, mapping));
724
725         if (bp->flags & B44_FLAG_RX_RING_HACK)
726                 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
727                                           src_idx * sizeof(src_desc),
728                                           DMA_BIDIRECTIONAL);
729
730         ctrl = src_desc->ctrl;
731         if (dest_idx == (B44_RX_RING_SIZE - 1))
732                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
733         else
734                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
735
736         dest_desc->ctrl = ctrl;
737         dest_desc->addr = src_desc->addr;
738
739         src_map->skb = NULL;
740
741         if (bp->flags & B44_FLAG_RX_RING_HACK)
742                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
743                                              dest_idx * sizeof(dest_desc),
744                                              DMA_BIDIRECTIONAL);
745
746         pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
747                                        RX_PKT_BUF_SZ,
748                                        PCI_DMA_FROMDEVICE);
749 }
750
751 static int b44_rx(struct b44 *bp, int budget)
752 {
753         int received;
754         u32 cons, prod;
755
756         received = 0;
757         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
758         prod /= sizeof(struct dma_desc);
759         cons = bp->rx_cons;
760
761         while (cons != prod && budget > 0) {
762                 struct ring_info *rp = &bp->rx_buffers[cons];
763                 struct sk_buff *skb = rp->skb;
764                 dma_addr_t map = pci_unmap_addr(rp, mapping);
765                 struct rx_header *rh;
766                 u16 len;
767
768                 pci_dma_sync_single_for_cpu(bp->pdev, map,
769                                             RX_PKT_BUF_SZ,
770                                             PCI_DMA_FROMDEVICE);
771                 rh = (struct rx_header *) skb->data;
772                 len = cpu_to_le16(rh->len);
773                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
774                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
775                 drop_it:
776                         b44_recycle_rx(bp, cons, bp->rx_prod);
777                 drop_it_no_recycle:
778                         bp->stats.rx_dropped++;
779                         goto next_pkt;
780                 }
781
782                 if (len == 0) {
783                         int i = 0;
784
785                         do {
786                                 udelay(2);
787                                 barrier();
788                                 len = cpu_to_le16(rh->len);
789                         } while (len == 0 && i++ < 5);
790                         if (len == 0)
791                                 goto drop_it;
792                 }
793
794                 /* Omit CRC. */
795                 len -= 4;
796
797                 if (len > RX_COPY_THRESHOLD) {
798                         int skb_size;
799                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
800                         if (skb_size < 0)
801                                 goto drop_it;
802                         pci_unmap_single(bp->pdev, map,
803                                          skb_size, PCI_DMA_FROMDEVICE);
804                         /* Leave out rx_header */
805                         skb_put(skb, len+bp->rx_offset);
806                         skb_pull(skb,bp->rx_offset);
807                 } else {
808                         struct sk_buff *copy_skb;
809
810                         b44_recycle_rx(bp, cons, bp->rx_prod);
811                         copy_skb = dev_alloc_skb(len + 2);
812                         if (copy_skb == NULL)
813                                 goto drop_it_no_recycle;
814
815                         copy_skb->dev = bp->dev;
816                         skb_reserve(copy_skb, 2);
817                         skb_put(copy_skb, len);
818                         /* DMA sync done above, copy just the actual packet */
819                         memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
820
821                         skb = copy_skb;
822                 }
823                 skb->ip_summed = CHECKSUM_NONE;
824                 skb->protocol = eth_type_trans(skb, bp->dev);
825                 netif_receive_skb(skb);
826                 bp->dev->last_rx = jiffies;
827                 received++;
828                 budget--;
829         next_pkt:
830                 bp->rx_prod = (bp->rx_prod + 1) &
831                         (B44_RX_RING_SIZE - 1);
832                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
833         }
834
835         bp->rx_cons = cons;
836         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
837
838         return received;
839 }
840
841 static int b44_poll(struct net_device *netdev, int *budget)
842 {
843         struct b44 *bp = netdev_priv(netdev);
844         int done;
845
846         spin_lock_irq(&bp->lock);
847
848         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
849                 /* spin_lock(&bp->tx_lock); */
850                 b44_tx(bp);
851                 /* spin_unlock(&bp->tx_lock); */
852         }
853         spin_unlock_irq(&bp->lock);
854
855         done = 1;
856         if (bp->istat & ISTAT_RX) {
857                 int orig_budget = *budget;
858                 int work_done;
859
860                 if (orig_budget > netdev->quota)
861                         orig_budget = netdev->quota;
862
863                 work_done = b44_rx(bp, orig_budget);
864
865                 *budget -= work_done;
866                 netdev->quota -= work_done;
867
868                 if (work_done >= orig_budget)
869                         done = 0;
870         }
871
872         if (bp->istat & ISTAT_ERRORS) {
873                 spin_lock_irq(&bp->lock);
874                 b44_halt(bp);
875                 b44_init_rings(bp);
876                 b44_init_hw(bp);
877                 netif_wake_queue(bp->dev);
878                 spin_unlock_irq(&bp->lock);
879                 done = 1;
880         }
881
882         if (done) {
883                 netif_rx_complete(netdev);
884                 b44_enable_ints(bp);
885         }
886
887         return (done ? 0 : 1);
888 }
889
890 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
891 {
892         struct net_device *dev = dev_id;
893         struct b44 *bp = netdev_priv(dev);
894         u32 istat, imask;
895         int handled = 0;
896
897         spin_lock(&bp->lock);
898
899         istat = br32(bp, B44_ISTAT);
900         imask = br32(bp, B44_IMASK);
901
902         /* ??? What the fuck is the purpose of the interrupt mask
903          * ??? register if we have to mask it out by hand anyways?
904          */
905         istat &= imask;
906         if (istat) {
907                 handled = 1;
908
909                 if (unlikely(!netif_running(dev))) {
910                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
911                         goto irq_ack;
912                 }
913
914                 if (netif_rx_schedule_prep(dev)) {
915                         /* NOTE: These writes are posted by the readback of
916                          *       the ISTAT register below.
917                          */
918                         bp->istat = istat;
919                         __b44_disable_ints(bp);
920                         __netif_rx_schedule(dev);
921                 } else {
922                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
923                                dev->name);
924                 }
925
926 irq_ack:
927                 bw32(bp, B44_ISTAT, istat);
928                 br32(bp, B44_ISTAT);
929         }
930         spin_unlock(&bp->lock);
931         return IRQ_RETVAL(handled);
932 }
933
934 static void b44_tx_timeout(struct net_device *dev)
935 {
936         struct b44 *bp = netdev_priv(dev);
937
938         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
939                dev->name);
940
941         spin_lock_irq(&bp->lock);
942
943         b44_halt(bp);
944         b44_init_rings(bp);
945         b44_init_hw(bp);
946
947         spin_unlock_irq(&bp->lock);
948
949         b44_enable_ints(bp);
950
951         netif_wake_queue(dev);
952 }
953
954 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
955 {
956         struct b44 *bp = netdev_priv(dev);
957         struct sk_buff *bounce_skb;
958         int rc = NETDEV_TX_OK;
959         dma_addr_t mapping;
960         u32 len, entry, ctrl;
961
962         len = skb->len;
963         spin_lock_irq(&bp->lock);
964
965         /* This is a hard error, log it. */
966         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
967                 netif_stop_queue(dev);
968                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
969                        dev->name);
970                 goto err_out;
971         }
972
973         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
974         if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
975                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
976                 if (!dma_mapping_error(mapping))
977                         pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
978
979                 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
980                                              GFP_ATOMIC|GFP_DMA);
981                 if (!bounce_skb)
982                         goto err_out;
983
984                 mapping = pci_map_single(bp->pdev, bounce_skb->data,
985                                          len, PCI_DMA_TODEVICE);
986                 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
987                         if (!dma_mapping_error(mapping))
988                                 pci_unmap_single(bp->pdev, mapping,
989                                          len, PCI_DMA_TODEVICE);
990                         dev_kfree_skb_any(bounce_skb);
991                         goto err_out;
992                 }
993
994                 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
995                 dev_kfree_skb_any(skb);
996                 skb = bounce_skb;
997         }
998
999         entry = bp->tx_prod;
1000         bp->tx_buffers[entry].skb = skb;
1001         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1002
1003         ctrl  = (len & DESC_CTRL_LEN);
1004         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1005         if (entry == (B44_TX_RING_SIZE - 1))
1006                 ctrl |= DESC_CTRL_EOT;
1007
1008         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1009         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1010
1011         if (bp->flags & B44_FLAG_TX_RING_HACK)
1012                 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1013                                              entry * sizeof(bp->tx_ring[0]),
1014                                              DMA_TO_DEVICE);
1015
1016         entry = NEXT_TX(entry);
1017
1018         bp->tx_prod = entry;
1019
1020         wmb();
1021
1022         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1023         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1024                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1025         if (bp->flags & B44_FLAG_REORDER_BUG)
1026                 br32(bp, B44_DMATX_PTR);
1027
1028         if (TX_BUFFS_AVAIL(bp) < 1)
1029                 netif_stop_queue(dev);
1030
1031         dev->trans_start = jiffies;
1032
1033 out_unlock:
1034         spin_unlock_irq(&bp->lock);
1035
1036         return rc;
1037
1038 err_out:
1039         rc = NETDEV_TX_BUSY;
1040         goto out_unlock;
1041 }
1042
1043 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1044 {
1045         struct b44 *bp = netdev_priv(dev);
1046
1047         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1048                 return -EINVAL;
1049
1050         if (!netif_running(dev)) {
1051                 /* We'll just catch it later when the
1052                  * device is up'd.
1053                  */
1054                 dev->mtu = new_mtu;
1055                 return 0;
1056         }
1057
1058         spin_lock_irq(&bp->lock);
1059         b44_halt(bp);
1060         dev->mtu = new_mtu;
1061         b44_init_rings(bp);
1062         b44_init_hw(bp);
1063         spin_unlock_irq(&bp->lock);
1064
1065         b44_enable_ints(bp);
1066
1067         return 0;
1068 }
1069
1070 /* Free up pending packets in all rx/tx rings.
1071  *
1072  * The chip has been shut down and the driver detached from
1073  * the networking, so no interrupts or new tx packets will
1074  * end up in the driver.  bp->lock is not held and we are not
1075  * in an interrupt context and thus may sleep.
1076  */
1077 static void b44_free_rings(struct b44 *bp)
1078 {
1079         struct ring_info *rp;
1080         int i;
1081
1082         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1083                 rp = &bp->rx_buffers[i];
1084
1085                 if (rp->skb == NULL)
1086                         continue;
1087                 pci_unmap_single(bp->pdev,
1088                                  pci_unmap_addr(rp, mapping),
1089                                  RX_PKT_BUF_SZ,
1090                                  PCI_DMA_FROMDEVICE);
1091                 dev_kfree_skb_any(rp->skb);
1092                 rp->skb = NULL;
1093         }
1094
1095         /* XXX needs changes once NETIF_F_SG is set... */
1096         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1097                 rp = &bp->tx_buffers[i];
1098
1099                 if (rp->skb == NULL)
1100                         continue;
1101                 pci_unmap_single(bp->pdev,
1102                                  pci_unmap_addr(rp, mapping),
1103                                  rp->skb->len,
1104                                  PCI_DMA_TODEVICE);
1105                 dev_kfree_skb_any(rp->skb);
1106                 rp->skb = NULL;
1107         }
1108 }
1109
1110 /* Initialize tx/rx rings for packet processing.
1111  *
1112  * The chip has been shut down and the driver detached from
1113  * the networking, so no interrupts or new tx packets will
1114  * end up in the driver.
1115  */
1116 static void b44_init_rings(struct b44 *bp)
1117 {
1118         int i;
1119
1120         b44_free_rings(bp);
1121
1122         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1123         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1124
1125         if (bp->flags & B44_FLAG_RX_RING_HACK)
1126                 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1127                                            DMA_TABLE_BYTES,
1128                                            PCI_DMA_BIDIRECTIONAL);
1129
1130         if (bp->flags & B44_FLAG_TX_RING_HACK)
1131                 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1132                                            DMA_TABLE_BYTES,
1133                                            PCI_DMA_TODEVICE);
1134
1135         for (i = 0; i < bp->rx_pending; i++) {
1136                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1137                         break;
1138         }
1139 }
1140
1141 /*
1142  * Must not be invoked with interrupt sources disabled and
1143  * the hardware shutdown down.
1144  */
1145 static void b44_free_consistent(struct b44 *bp)
1146 {
1147         kfree(bp->rx_buffers);
1148         bp->rx_buffers = NULL;
1149         kfree(bp->tx_buffers);
1150         bp->tx_buffers = NULL;
1151         if (bp->rx_ring) {
1152                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1153                         dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1154                                          DMA_TABLE_BYTES,
1155                                          DMA_BIDIRECTIONAL);
1156                         kfree(bp->rx_ring);
1157                 } else
1158                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1159                                             bp->rx_ring, bp->rx_ring_dma);
1160                 bp->rx_ring = NULL;
1161                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1162         }
1163         if (bp->tx_ring) {
1164                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1165                         dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1166                                          DMA_TABLE_BYTES,
1167                                          DMA_TO_DEVICE);
1168                         kfree(bp->tx_ring);
1169                 } else
1170                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1171                                             bp->tx_ring, bp->tx_ring_dma);
1172                 bp->tx_ring = NULL;
1173                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1174         }
1175 }
1176
1177 /*
1178  * Must not be invoked with interrupt sources disabled and
1179  * the hardware shutdown down.  Can sleep.
1180  */
1181 static int b44_alloc_consistent(struct b44 *bp)
1182 {
1183         int size;
1184
1185         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1186         bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1187         if (!bp->rx_buffers)
1188                 goto out_err;
1189
1190         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1191         bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1192         if (!bp->tx_buffers)
1193                 goto out_err;
1194
1195         size = DMA_TABLE_BYTES;
1196         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1197         if (!bp->rx_ring) {
1198                 /* Allocation may have failed due to pci_alloc_consistent
1199                    insisting on use of GFP_DMA, which is more restrictive
1200                    than necessary...  */
1201                 struct dma_desc *rx_ring;
1202                 dma_addr_t rx_ring_dma;
1203
1204                 rx_ring = kzalloc(size, GFP_KERNEL);
1205                 if (!rx_ring)
1206                         goto out_err;
1207
1208                 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1209                                              DMA_TABLE_BYTES,
1210                                              DMA_BIDIRECTIONAL);
1211
1212                 if (dma_mapping_error(rx_ring_dma) ||
1213                         rx_ring_dma + size > B44_DMA_MASK) {
1214                         kfree(rx_ring);
1215                         goto out_err;
1216                 }
1217
1218                 bp->rx_ring = rx_ring;
1219                 bp->rx_ring_dma = rx_ring_dma;
1220                 bp->flags |= B44_FLAG_RX_RING_HACK;
1221         }
1222
1223         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1224         if (!bp->tx_ring) {
1225                 /* Allocation may have failed due to pci_alloc_consistent
1226                    insisting on use of GFP_DMA, which is more restrictive
1227                    than necessary...  */
1228                 struct dma_desc *tx_ring;
1229                 dma_addr_t tx_ring_dma;
1230
1231                 tx_ring = kzalloc(size, GFP_KERNEL);
1232                 if (!tx_ring)
1233                         goto out_err;
1234
1235                 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1236                                              DMA_TABLE_BYTES,
1237                                              DMA_TO_DEVICE);
1238
1239                 if (dma_mapping_error(tx_ring_dma) ||
1240                         tx_ring_dma + size > B44_DMA_MASK) {
1241                         kfree(tx_ring);
1242                         goto out_err;
1243                 }
1244
1245                 bp->tx_ring = tx_ring;
1246                 bp->tx_ring_dma = tx_ring_dma;
1247                 bp->flags |= B44_FLAG_TX_RING_HACK;
1248         }
1249
1250         return 0;
1251
1252 out_err:
1253         b44_free_consistent(bp);
1254         return -ENOMEM;
1255 }
1256
1257 /* bp->lock is held. */
1258 static void b44_clear_stats(struct b44 *bp)
1259 {
1260         unsigned long reg;
1261
1262         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1263         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1264                 br32(bp, reg);
1265         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1266                 br32(bp, reg);
1267 }
1268
1269 /* bp->lock is held. */
1270 static void b44_chip_reset(struct b44 *bp)
1271 {
1272         if (ssb_is_core_up(bp)) {
1273                 bw32(bp, B44_RCV_LAZY, 0);
1274                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1275                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1276                 bw32(bp, B44_DMATX_CTRL, 0);
1277                 bp->tx_prod = bp->tx_cons = 0;
1278                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1279                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1280                                      100, 0);
1281                 }
1282                 bw32(bp, B44_DMARX_CTRL, 0);
1283                 bp->rx_prod = bp->rx_cons = 0;
1284         } else {
1285                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1286                                    SBINTVEC_ENET0 :
1287                                    SBINTVEC_ENET1));
1288         }
1289
1290         ssb_core_reset(bp);
1291
1292         b44_clear_stats(bp);
1293
1294         /* Make PHY accessible. */
1295         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1296                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1297         br32(bp, B44_MDIO_CTRL);
1298
1299         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1300                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1301                 br32(bp, B44_ENET_CTRL);
1302                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1303         } else {
1304                 u32 val = br32(bp, B44_DEVCTRL);
1305
1306                 if (val & DEVCTRL_EPR) {
1307                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1308                         br32(bp, B44_DEVCTRL);
1309                         udelay(100);
1310                 }
1311                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1312         }
1313 }
1314
1315 /* bp->lock is held. */
1316 static void b44_halt(struct b44 *bp)
1317 {
1318         b44_disable_ints(bp);
1319         b44_chip_reset(bp);
1320 }
1321
1322 /* bp->lock is held. */
1323 static void __b44_set_mac_addr(struct b44 *bp)
1324 {
1325         bw32(bp, B44_CAM_CTRL, 0);
1326         if (!(bp->dev->flags & IFF_PROMISC)) {
1327                 u32 val;
1328
1329                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1330                 val = br32(bp, B44_CAM_CTRL);
1331                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1332         }
1333 }
1334
1335 static int b44_set_mac_addr(struct net_device *dev, void *p)
1336 {
1337         struct b44 *bp = netdev_priv(dev);
1338         struct sockaddr *addr = p;
1339
1340         if (netif_running(dev))
1341                 return -EBUSY;
1342
1343         if (!is_valid_ether_addr(addr->sa_data))
1344                 return -EINVAL;
1345
1346         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1347
1348         spin_lock_irq(&bp->lock);
1349         __b44_set_mac_addr(bp);
1350         spin_unlock_irq(&bp->lock);
1351
1352         return 0;
1353 }
1354
1355 /* Called at device open time to get the chip ready for
1356  * packet processing.  Invoked with bp->lock held.
1357  */
1358 static void __b44_set_rx_mode(struct net_device *);
1359 static void b44_init_hw(struct b44 *bp)
1360 {
1361         u32 val;
1362
1363         b44_chip_reset(bp);
1364         b44_phy_reset(bp);
1365         b44_setup_phy(bp);
1366
1367         /* Enable CRC32, set proper LED modes and power on PHY */
1368         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1369         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1370
1371         /* This sets the MAC address too.  */
1372         __b44_set_rx_mode(bp->dev);
1373
1374         /* MTU + eth header + possible VLAN tag + struct rx_header */
1375         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1376         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1377
1378         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1379         bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1380         bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1381         bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1382                               (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1383         bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1384
1385         bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1386         bp->rx_prod = bp->rx_pending;
1387
1388         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1389
1390         val = br32(bp, B44_ENET_CTRL);
1391         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1392 }
1393
1394 static int b44_open(struct net_device *dev)
1395 {
1396         struct b44 *bp = netdev_priv(dev);
1397         int err;
1398
1399         err = b44_alloc_consistent(bp);
1400         if (err)
1401                 goto out;
1402
1403         b44_init_rings(bp);
1404         b44_init_hw(bp);
1405
1406         b44_check_phy(bp);
1407
1408         err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1409         if (unlikely(err < 0)) {
1410                 b44_chip_reset(bp);
1411                 b44_free_rings(bp);
1412                 b44_free_consistent(bp);
1413                 goto out;
1414         }
1415
1416         init_timer(&bp->timer);
1417         bp->timer.expires = jiffies + HZ;
1418         bp->timer.data = (unsigned long) bp;
1419         bp->timer.function = b44_timer;
1420         add_timer(&bp->timer);
1421
1422         b44_enable_ints(bp);
1423         netif_start_queue(dev);
1424 out:
1425         return err;
1426 }
1427
1428 #if 0
1429 /*static*/ void b44_dump_state(struct b44 *bp)
1430 {
1431         u32 val32, val32_2, val32_3, val32_4, val32_5;
1432         u16 val16;
1433
1434         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1435         printk("DEBUG: PCI status [%04x] \n", val16);
1436
1437 }
1438 #endif
1439
1440 #ifdef CONFIG_NET_POLL_CONTROLLER
1441 /*
1442  * Polling receive - used by netconsole and other diagnostic tools
1443  * to allow network i/o with interrupts disabled.
1444  */
1445 static void b44_poll_controller(struct net_device *dev)
1446 {
1447         disable_irq(dev->irq);
1448         b44_interrupt(dev->irq, dev, NULL);
1449         enable_irq(dev->irq);
1450 }
1451 #endif
1452
1453 static int b44_close(struct net_device *dev)
1454 {
1455         struct b44 *bp = netdev_priv(dev);
1456
1457         netif_stop_queue(dev);
1458
1459         netif_poll_disable(dev);
1460
1461         del_timer_sync(&bp->timer);
1462
1463         spin_lock_irq(&bp->lock);
1464
1465 #if 0
1466         b44_dump_state(bp);
1467 #endif
1468         b44_halt(bp);
1469         b44_free_rings(bp);
1470         netif_carrier_off(dev);
1471
1472         spin_unlock_irq(&bp->lock);
1473
1474         free_irq(dev->irq, dev);
1475
1476         netif_poll_enable(dev);
1477
1478         b44_free_consistent(bp);
1479
1480         return 0;
1481 }
1482
1483 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1484 {
1485         struct b44 *bp = netdev_priv(dev);
1486         struct net_device_stats *nstat = &bp->stats;
1487         struct b44_hw_stats *hwstat = &bp->hw_stats;
1488
1489         /* Convert HW stats into netdevice stats. */
1490         nstat->rx_packets = hwstat->rx_pkts;
1491         nstat->tx_packets = hwstat->tx_pkts;
1492         nstat->rx_bytes   = hwstat->rx_octets;
1493         nstat->tx_bytes   = hwstat->tx_octets;
1494         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1495                              hwstat->tx_oversize_pkts +
1496                              hwstat->tx_underruns +
1497                              hwstat->tx_excessive_cols +
1498                              hwstat->tx_late_cols);
1499         nstat->multicast  = hwstat->tx_multicast_pkts;
1500         nstat->collisions = hwstat->tx_total_cols;
1501
1502         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1503                                    hwstat->rx_undersize);
1504         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1505         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1506         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1507         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1508                                    hwstat->rx_oversize_pkts +
1509                                    hwstat->rx_missed_pkts +
1510                                    hwstat->rx_crc_align_errs +
1511                                    hwstat->rx_undersize +
1512                                    hwstat->rx_crc_errs +
1513                                    hwstat->rx_align_errs +
1514                                    hwstat->rx_symbol_errs);
1515
1516         nstat->tx_aborted_errors = hwstat->tx_underruns;
1517 #if 0
1518         /* Carrier lost counter seems to be broken for some devices */
1519         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1520 #endif
1521
1522         return nstat;
1523 }
1524
1525 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1526 {
1527         struct dev_mc_list *mclist;
1528         int i, num_ents;
1529
1530         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1531         mclist = dev->mc_list;
1532         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1533                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1534         }
1535         return i+1;
1536 }
1537
1538 static void __b44_set_rx_mode(struct net_device *dev)
1539 {
1540         struct b44 *bp = netdev_priv(dev);
1541         u32 val;
1542
1543         val = br32(bp, B44_RXCONFIG);
1544         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1545         if (dev->flags & IFF_PROMISC) {
1546                 val |= RXCONFIG_PROMISC;
1547                 bw32(bp, B44_RXCONFIG, val);
1548         } else {
1549                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1550                 int i = 0;
1551
1552                 __b44_set_mac_addr(bp);
1553
1554                 if (dev->flags & IFF_ALLMULTI)
1555                         val |= RXCONFIG_ALLMULTI;
1556                 else
1557                         i = __b44_load_mcast(bp, dev);
1558
1559                 for (; i < 64; i++) {
1560                         __b44_cam_write(bp, zero, i);
1561                 }
1562                 bw32(bp, B44_RXCONFIG, val);
1563                 val = br32(bp, B44_CAM_CTRL);
1564                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1565         }
1566 }
1567
1568 static void b44_set_rx_mode(struct net_device *dev)
1569 {
1570         struct b44 *bp = netdev_priv(dev);
1571
1572         spin_lock_irq(&bp->lock);
1573         __b44_set_rx_mode(dev);
1574         spin_unlock_irq(&bp->lock);
1575 }
1576
1577 static u32 b44_get_msglevel(struct net_device *dev)
1578 {
1579         struct b44 *bp = netdev_priv(dev);
1580         return bp->msg_enable;
1581 }
1582
1583 static void b44_set_msglevel(struct net_device *dev, u32 value)
1584 {
1585         struct b44 *bp = netdev_priv(dev);
1586         bp->msg_enable = value;
1587 }
1588
1589 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1590 {
1591         struct b44 *bp = netdev_priv(dev);
1592         struct pci_dev *pci_dev = bp->pdev;
1593
1594         strcpy (info->driver, DRV_MODULE_NAME);
1595         strcpy (info->version, DRV_MODULE_VERSION);
1596         strcpy (info->bus_info, pci_name(pci_dev));
1597 }
1598
1599 static int b44_nway_reset(struct net_device *dev)
1600 {
1601         struct b44 *bp = netdev_priv(dev);
1602         u32 bmcr;
1603         int r;
1604
1605         spin_lock_irq(&bp->lock);
1606         b44_readphy(bp, MII_BMCR, &bmcr);
1607         b44_readphy(bp, MII_BMCR, &bmcr);
1608         r = -EINVAL;
1609         if (bmcr & BMCR_ANENABLE) {
1610                 b44_writephy(bp, MII_BMCR,
1611                              bmcr | BMCR_ANRESTART);
1612                 r = 0;
1613         }
1614         spin_unlock_irq(&bp->lock);
1615
1616         return r;
1617 }
1618
1619 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1620 {
1621         struct b44 *bp = netdev_priv(dev);
1622
1623         cmd->supported = (SUPPORTED_Autoneg);
1624         cmd->supported |= (SUPPORTED_100baseT_Half |
1625                           SUPPORTED_100baseT_Full |
1626                           SUPPORTED_10baseT_Half |
1627                           SUPPORTED_10baseT_Full |
1628                           SUPPORTED_MII);
1629
1630         cmd->advertising = 0;
1631         if (bp->flags & B44_FLAG_ADV_10HALF)
1632                 cmd->advertising |= ADVERTISED_10baseT_Half;
1633         if (bp->flags & B44_FLAG_ADV_10FULL)
1634                 cmd->advertising |= ADVERTISED_10baseT_Full;
1635         if (bp->flags & B44_FLAG_ADV_100HALF)
1636                 cmd->advertising |= ADVERTISED_100baseT_Half;
1637         if (bp->flags & B44_FLAG_ADV_100FULL)
1638                 cmd->advertising |= ADVERTISED_100baseT_Full;
1639         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1640         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1641                 SPEED_100 : SPEED_10;
1642         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1643                 DUPLEX_FULL : DUPLEX_HALF;
1644         cmd->port = 0;
1645         cmd->phy_address = bp->phy_addr;
1646         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1647                 XCVR_INTERNAL : XCVR_EXTERNAL;
1648         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1649                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1650         if (cmd->autoneg == AUTONEG_ENABLE)
1651                 cmd->advertising |= ADVERTISED_Autoneg;
1652         if (!netif_running(dev)){
1653                 cmd->speed = 0;
1654                 cmd->duplex = 0xff;
1655         }
1656         cmd->maxtxpkt = 0;
1657         cmd->maxrxpkt = 0;
1658         return 0;
1659 }
1660
1661 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1662 {
1663         struct b44 *bp = netdev_priv(dev);
1664
1665         /* We do not support gigabit. */
1666         if (cmd->autoneg == AUTONEG_ENABLE) {
1667                 if (cmd->advertising &
1668                     (ADVERTISED_1000baseT_Half |
1669                      ADVERTISED_1000baseT_Full))
1670                         return -EINVAL;
1671         } else if ((cmd->speed != SPEED_100 &&
1672                     cmd->speed != SPEED_10) ||
1673                    (cmd->duplex != DUPLEX_HALF &&
1674                     cmd->duplex != DUPLEX_FULL)) {
1675                         return -EINVAL;
1676         }
1677
1678         spin_lock_irq(&bp->lock);
1679
1680         if (cmd->autoneg == AUTONEG_ENABLE) {
1681                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1682                                B44_FLAG_100_BASE_T |
1683                                B44_FLAG_FULL_DUPLEX |
1684                                B44_FLAG_ADV_10HALF |
1685                                B44_FLAG_ADV_10FULL |
1686                                B44_FLAG_ADV_100HALF |
1687                                B44_FLAG_ADV_100FULL);
1688                 if (cmd->advertising == 0) {
1689                         bp->flags |= (B44_FLAG_ADV_10HALF |
1690                                       B44_FLAG_ADV_10FULL |
1691                                       B44_FLAG_ADV_100HALF |
1692                                       B44_FLAG_ADV_100FULL);
1693                 } else {
1694                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1695                                 bp->flags |= B44_FLAG_ADV_10HALF;
1696                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1697                                 bp->flags |= B44_FLAG_ADV_10FULL;
1698                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1699                                 bp->flags |= B44_FLAG_ADV_100HALF;
1700                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1701                                 bp->flags |= B44_FLAG_ADV_100FULL;
1702                 }
1703         } else {
1704                 bp->flags |= B44_FLAG_FORCE_LINK;
1705                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1706                 if (cmd->speed == SPEED_100)
1707                         bp->flags |= B44_FLAG_100_BASE_T;
1708                 if (cmd->duplex == DUPLEX_FULL)
1709                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1710         }
1711
1712         if (netif_running(dev))
1713                 b44_setup_phy(bp);
1714
1715         spin_unlock_irq(&bp->lock);
1716
1717         return 0;
1718 }
1719
1720 static void b44_get_ringparam(struct net_device *dev,
1721                               struct ethtool_ringparam *ering)
1722 {
1723         struct b44 *bp = netdev_priv(dev);
1724
1725         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1726         ering->rx_pending = bp->rx_pending;
1727
1728         /* XXX ethtool lacks a tx_max_pending, oops... */
1729 }
1730
1731 static int b44_set_ringparam(struct net_device *dev,
1732                              struct ethtool_ringparam *ering)
1733 {
1734         struct b44 *bp = netdev_priv(dev);
1735
1736         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1737             (ering->rx_mini_pending != 0) ||
1738             (ering->rx_jumbo_pending != 0) ||
1739             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1740                 return -EINVAL;
1741
1742         spin_lock_irq(&bp->lock);
1743
1744         bp->rx_pending = ering->rx_pending;
1745         bp->tx_pending = ering->tx_pending;
1746
1747         b44_halt(bp);
1748         b44_init_rings(bp);
1749         b44_init_hw(bp);
1750         netif_wake_queue(bp->dev);
1751         spin_unlock_irq(&bp->lock);
1752
1753         b44_enable_ints(bp);
1754
1755         return 0;
1756 }
1757
1758 static void b44_get_pauseparam(struct net_device *dev,
1759                                 struct ethtool_pauseparam *epause)
1760 {
1761         struct b44 *bp = netdev_priv(dev);
1762
1763         epause->autoneg =
1764                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1765         epause->rx_pause =
1766                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1767         epause->tx_pause =
1768                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1769 }
1770
1771 static int b44_set_pauseparam(struct net_device *dev,
1772                                 struct ethtool_pauseparam *epause)
1773 {
1774         struct b44 *bp = netdev_priv(dev);
1775
1776         spin_lock_irq(&bp->lock);
1777         if (epause->autoneg)
1778                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1779         else
1780                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1781         if (epause->rx_pause)
1782                 bp->flags |= B44_FLAG_RX_PAUSE;
1783         else
1784                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1785         if (epause->tx_pause)
1786                 bp->flags |= B44_FLAG_TX_PAUSE;
1787         else
1788                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1789         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1790                 b44_halt(bp);
1791                 b44_init_rings(bp);
1792                 b44_init_hw(bp);
1793         } else {
1794                 __b44_set_flow_ctrl(bp, bp->flags);
1795         }
1796         spin_unlock_irq(&bp->lock);
1797
1798         b44_enable_ints(bp);
1799
1800         return 0;
1801 }
1802
1803 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1804 {
1805         switch(stringset) {
1806         case ETH_SS_STATS:
1807                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1808                 break;
1809         }
1810 }
1811
1812 static int b44_get_stats_count(struct net_device *dev)
1813 {
1814         return ARRAY_SIZE(b44_gstrings);
1815 }
1816
1817 static void b44_get_ethtool_stats(struct net_device *dev,
1818                                   struct ethtool_stats *stats, u64 *data)
1819 {
1820         struct b44 *bp = netdev_priv(dev);
1821         u32 *val = &bp->hw_stats.tx_good_octets;
1822         u32 i;
1823
1824         spin_lock_irq(&bp->lock);
1825
1826         b44_stats_update(bp);
1827
1828         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1829                 *data++ = *val++;
1830
1831         spin_unlock_irq(&bp->lock);
1832 }
1833
1834 static struct ethtool_ops b44_ethtool_ops = {
1835         .get_drvinfo            = b44_get_drvinfo,
1836         .get_settings           = b44_get_settings,
1837         .set_settings           = b44_set_settings,
1838         .nway_reset             = b44_nway_reset,
1839         .get_link               = ethtool_op_get_link,
1840         .get_ringparam          = b44_get_ringparam,
1841         .set_ringparam          = b44_set_ringparam,
1842         .get_pauseparam         = b44_get_pauseparam,
1843         .set_pauseparam         = b44_set_pauseparam,
1844         .get_msglevel           = b44_get_msglevel,
1845         .set_msglevel           = b44_set_msglevel,
1846         .get_strings            = b44_get_strings,
1847         .get_stats_count        = b44_get_stats_count,
1848         .get_ethtool_stats      = b44_get_ethtool_stats,
1849         .get_perm_addr          = ethtool_op_get_perm_addr,
1850 };
1851
1852 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1853 {
1854         struct mii_ioctl_data *data = if_mii(ifr);
1855         struct b44 *bp = netdev_priv(dev);
1856         int err = -EINVAL;
1857
1858         if (!netif_running(dev))
1859                 goto out;
1860
1861         spin_lock_irq(&bp->lock);
1862         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1863         spin_unlock_irq(&bp->lock);
1864 out:
1865         return err;
1866 }
1867
1868 /* Read 128-bytes of EEPROM. */
1869 static int b44_read_eeprom(struct b44 *bp, u8 *data)
1870 {
1871         long i;
1872         u16 *ptr = (u16 *) data;
1873
1874         for (i = 0; i < 128; i += 2)
1875                 ptr[i / 2] = readw(bp->regs + 4096 + i);
1876
1877         return 0;
1878 }
1879
1880 static int __devinit b44_get_invariants(struct b44 *bp)
1881 {
1882         u8 eeprom[128];
1883         int err;
1884
1885         err = b44_read_eeprom(bp, &eeprom[0]);
1886         if (err)
1887                 goto out;
1888
1889         bp->dev->dev_addr[0] = eeprom[79];
1890         bp->dev->dev_addr[1] = eeprom[78];
1891         bp->dev->dev_addr[2] = eeprom[81];
1892         bp->dev->dev_addr[3] = eeprom[80];
1893         bp->dev->dev_addr[4] = eeprom[83];
1894         bp->dev->dev_addr[5] = eeprom[82];
1895
1896         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1897                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1898                 return -EINVAL;
1899         }
1900
1901         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1902
1903         bp->phy_addr = eeprom[90] & 0x1f;
1904
1905         /* With this, plus the rx_header prepended to the data by the
1906          * hardware, we'll land the ethernet header on a 2-byte boundary.
1907          */
1908         bp->rx_offset = 30;
1909
1910         bp->imask = IMASK_DEF;
1911
1912         bp->core_unit = ssb_core_unit(bp);
1913         bp->dma_offset = SB_PCI_DMA;
1914
1915         /* XXX - really required?
1916            bp->flags |= B44_FLAG_BUGGY_TXPTR;
1917          */
1918 out:
1919         return err;
1920 }
1921
1922 static int __devinit b44_init_one(struct pci_dev *pdev,
1923                                   const struct pci_device_id *ent)
1924 {
1925         static int b44_version_printed = 0;
1926         unsigned long b44reg_base, b44reg_len;
1927         struct net_device *dev;
1928         struct b44 *bp;
1929         int err, i;
1930
1931         if (b44_version_printed++ == 0)
1932                 printk(KERN_INFO "%s", version);
1933
1934         err = pci_enable_device(pdev);
1935         if (err) {
1936                 printk(KERN_ERR PFX "Cannot enable PCI device, "
1937                        "aborting.\n");
1938                 return err;
1939         }
1940
1941         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1942                 printk(KERN_ERR PFX "Cannot find proper PCI device "
1943                        "base address, aborting.\n");
1944                 err = -ENODEV;
1945                 goto err_out_disable_pdev;
1946         }
1947
1948         err = pci_request_regions(pdev, DRV_MODULE_NAME);
1949         if (err) {
1950                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
1951                        "aborting.\n");
1952                 goto err_out_disable_pdev;
1953         }
1954
1955         pci_set_master(pdev);
1956
1957         err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
1958         if (err) {
1959                 printk(KERN_ERR PFX "No usable DMA configuration, "
1960                        "aborting.\n");
1961                 goto err_out_free_res;
1962         }
1963
1964         err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
1965         if (err) {
1966                 printk(KERN_ERR PFX "No usable DMA configuration, "
1967                        "aborting.\n");
1968                 goto err_out_free_res;
1969         }
1970
1971         b44reg_base = pci_resource_start(pdev, 0);
1972         b44reg_len = pci_resource_len(pdev, 0);
1973
1974         dev = alloc_etherdev(sizeof(*bp));
1975         if (!dev) {
1976                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1977                 err = -ENOMEM;
1978                 goto err_out_free_res;
1979         }
1980
1981         SET_MODULE_OWNER(dev);
1982         SET_NETDEV_DEV(dev,&pdev->dev);
1983
1984         /* No interesting netdevice features in this card... */
1985         dev->features |= 0;
1986
1987         bp = netdev_priv(dev);
1988         bp->pdev = pdev;
1989         bp->dev = dev;
1990
1991         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1992
1993         spin_lock_init(&bp->lock);
1994
1995         bp->regs = ioremap(b44reg_base, b44reg_len);
1996         if (bp->regs == 0UL) {
1997                 printk(KERN_ERR PFX "Cannot map device registers, "
1998                        "aborting.\n");
1999                 err = -ENOMEM;
2000                 goto err_out_free_dev;
2001         }
2002
2003         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2004         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2005
2006         dev->open = b44_open;
2007         dev->stop = b44_close;
2008         dev->hard_start_xmit = b44_start_xmit;
2009         dev->get_stats = b44_get_stats;
2010         dev->set_multicast_list = b44_set_rx_mode;
2011         dev->set_mac_address = b44_set_mac_addr;
2012         dev->do_ioctl = b44_ioctl;
2013         dev->tx_timeout = b44_tx_timeout;
2014         dev->poll = b44_poll;
2015         dev->weight = 64;
2016         dev->watchdog_timeo = B44_TX_TIMEOUT;
2017 #ifdef CONFIG_NET_POLL_CONTROLLER
2018         dev->poll_controller = b44_poll_controller;
2019 #endif
2020         dev->change_mtu = b44_change_mtu;
2021         dev->irq = pdev->irq;
2022         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2023
2024         netif_carrier_off(dev);
2025
2026         err = b44_get_invariants(bp);
2027         if (err) {
2028                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
2029                        "aborting.\n");
2030                 goto err_out_iounmap;
2031         }
2032
2033         bp->mii_if.dev = dev;
2034         bp->mii_if.mdio_read = b44_mii_read;
2035         bp->mii_if.mdio_write = b44_mii_write;
2036         bp->mii_if.phy_id = bp->phy_addr;
2037         bp->mii_if.phy_id_mask = 0x1f;
2038         bp->mii_if.reg_num_mask = 0x1f;
2039
2040         /* By default, advertise all speed/duplex settings. */
2041         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2042                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2043
2044         /* By default, auto-negotiate PAUSE. */
2045         bp->flags |= B44_FLAG_PAUSE_AUTO;
2046
2047         err = register_netdev(dev);
2048         if (err) {
2049                 printk(KERN_ERR PFX "Cannot register net device, "
2050                        "aborting.\n");
2051                 goto err_out_iounmap;
2052         }
2053
2054         pci_set_drvdata(pdev, dev);
2055
2056         pci_save_state(bp->pdev);
2057
2058         /* Chip reset provides power to the b44 MAC & PCI cores, which
2059          * is necessary for MAC register access.
2060          */
2061         b44_chip_reset(bp);
2062
2063         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2064         for (i = 0; i < 6; i++)
2065                 printk("%2.2x%c", dev->dev_addr[i],
2066                        i == 5 ? '\n' : ':');
2067
2068         return 0;
2069
2070 err_out_iounmap:
2071         iounmap(bp->regs);
2072
2073 err_out_free_dev:
2074         free_netdev(dev);
2075
2076 err_out_free_res:
2077         pci_release_regions(pdev);
2078
2079 err_out_disable_pdev:
2080         pci_disable_device(pdev);
2081         pci_set_drvdata(pdev, NULL);
2082         return err;
2083 }
2084
2085 static void __devexit b44_remove_one(struct pci_dev *pdev)
2086 {
2087         struct net_device *dev = pci_get_drvdata(pdev);
2088         struct b44 *bp = netdev_priv(dev);
2089
2090         unregister_netdev(dev);
2091         iounmap(bp->regs);
2092         free_netdev(dev);
2093         pci_release_regions(pdev);
2094         pci_disable_device(pdev);
2095         pci_set_drvdata(pdev, NULL);
2096 }
2097
2098 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2099 {
2100         struct net_device *dev = pci_get_drvdata(pdev);
2101         struct b44 *bp = netdev_priv(dev);
2102
2103         if (!netif_running(dev))
2104                  return 0;
2105
2106         del_timer_sync(&bp->timer);
2107
2108         spin_lock_irq(&bp->lock);
2109
2110         b44_halt(bp);
2111         netif_carrier_off(bp->dev);
2112         netif_device_detach(bp->dev);
2113         b44_free_rings(bp);
2114
2115         spin_unlock_irq(&bp->lock);
2116
2117         free_irq(dev->irq, dev);
2118         pci_disable_device(pdev);
2119         return 0;
2120 }
2121
2122 static int b44_resume(struct pci_dev *pdev)
2123 {
2124         struct net_device *dev = pci_get_drvdata(pdev);
2125         struct b44 *bp = netdev_priv(dev);
2126
2127         pci_restore_state(pdev);
2128         pci_enable_device(pdev);
2129         pci_set_master(pdev);
2130
2131         if (!netif_running(dev))
2132                 return 0;
2133
2134         if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2135                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2136
2137         spin_lock_irq(&bp->lock);
2138
2139         b44_init_rings(bp);
2140         b44_init_hw(bp);
2141         netif_device_attach(bp->dev);
2142         spin_unlock_irq(&bp->lock);
2143
2144         bp->timer.expires = jiffies + HZ;
2145         add_timer(&bp->timer);
2146
2147         b44_enable_ints(bp);
2148         netif_wake_queue(dev);
2149         return 0;
2150 }
2151
2152 static struct pci_driver b44_driver = {
2153         .name           = DRV_MODULE_NAME,
2154         .id_table       = b44_pci_tbl,
2155         .probe          = b44_init_one,
2156         .remove         = __devexit_p(b44_remove_one),
2157         .suspend        = b44_suspend,
2158         .resume         = b44_resume,
2159 };
2160
2161 static int __init b44_init(void)
2162 {
2163         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2164
2165         /* Setup paramaters for syncing RX/TX DMA descriptors */
2166         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2167         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2168
2169         return pci_module_init(&b44_driver);
2170 }
2171
2172 static void __exit b44_cleanup(void)
2173 {
2174         pci_unregister_driver(&b44_driver);
2175 }
2176
2177 module_init(b44_init);
2178 module_exit(b44_cleanup);
2179